imgur.py 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Imgur (images)
  4. """
  5. from urllib.parse import urlencode
  6. from lxml import html
  7. from searx.utils import extract_text, eval_xpath, eval_xpath_list
  8. about = {
  9. "website": 'https://imgur.com/',
  10. "wikidata_id": 'Q107565255',
  11. "official_api_documentation": 'https://api.imgur.com/',
  12. "use_official_api": False,
  13. "require_api_key": False,
  14. "results": 'HTML',
  15. }
  16. categories = ['images']
  17. paging = True
  18. time_range_support = True
  19. base_url = "https://imgur.com"
  20. results_xpath = "//div[contains(@class, 'cards')]/div[contains(@class, 'post')]"
  21. url_xpath = "./a/@href"
  22. title_xpath = "./a/img/@alt"
  23. thumbnail_xpath = "./a/img/@src"
  24. def request(query, params):
  25. time_range = params['time_range'] or 'all'
  26. args = {
  27. 'q': query,
  28. 'qs': 'thumbs',
  29. 'p': params['pageno'] - 1,
  30. }
  31. params['url'] = f"{base_url}/search/score/{time_range}?{urlencode(args)}"
  32. return params
  33. def response(resp):
  34. results = []
  35. dom = html.fromstring(resp.text)
  36. for result in eval_xpath_list(dom, results_xpath):
  37. thumbnail_src = extract_text(eval_xpath(result, thumbnail_xpath))
  38. img_src = thumbnail_src.replace("b.", ".")
  39. # that's a bug at imgur's side:
  40. # sometimes there's just no preview image, hence we skip the image
  41. if len(thumbnail_src) < 25:
  42. continue
  43. results.append(
  44. {
  45. 'template': 'images.html',
  46. 'url': base_url + extract_text(eval_xpath(result, url_xpath)),
  47. 'title': extract_text(eval_xpath(result, title_xpath)),
  48. 'img_src': img_src,
  49. 'thumbnail_src': thumbnail_src,
  50. }
  51. )
  52. return results