www1x.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """1x (Images)
  3. """
  4. from urllib.parse import urlencode, urljoin
  5. from lxml import html, etree
  6. from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
  7. # about
  8. about = {
  9. "website": 'https://1x.com/',
  10. "wikidata_id": None,
  11. "official_api_documentation": None,
  12. "use_official_api": False,
  13. "require_api_key": False,
  14. "results": 'HTML',
  15. }
  16. # engine dependent config
  17. categories = ['images']
  18. paging = False
  19. # search-url
  20. base_url = 'https://1x.com'
  21. search_url = base_url + '/backend/search.php?{query}'
  22. gallery_url = 'https://gallery.1x.com/'
  23. # do search-request
  24. def request(query, params):
  25. params['url'] = search_url.format(query=urlencode({'q': query}))
  26. return params
  27. # get response from search-request
  28. def response(resp):
  29. results = []
  30. xmldom = etree.fromstring(resp.content)
  31. xmlsearchresult = eval_xpath_getindex(xmldom, '//data', 0)
  32. dom = html.fragment_fromstring(xmlsearchresult.text, create_parent='div')
  33. for link in eval_xpath_list(dom, '//a'):
  34. url = urljoin(base_url, link.attrib.get('href'))
  35. title = extract_text(link)
  36. thumbnail_src = urljoin(
  37. gallery_url, (eval_xpath_getindex(link, './/img', 0).attrib['src']).replace(base_url, '')
  38. )
  39. # append result
  40. results.append(
  41. {
  42. 'url': url,
  43. 'title': title,
  44. 'img_src': thumbnail_src,
  45. 'content': '',
  46. 'thumbnail_src': thumbnail_src,
  47. 'template': 'images.html',
  48. }
  49. )
  50. # return results
  51. return results