xpath.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. from lxml import html
  3. from urllib.parse import urlencode
  4. from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
  5. search_url = None
  6. url_xpath = None
  7. content_xpath = None
  8. title_xpath = None
  9. thumbnail_xpath = False
  10. paging = False
  11. suggestion_xpath = ''
  12. results_xpath = ''
  13. cached_xpath = ''
  14. cached_url = ''
  15. soft_max_redirects = 0
  16. # parameters for engines with paging support
  17. #
  18. # number of results on each page
  19. # (only needed if the site requires not a page number, but an offset)
  20. page_size = 1
  21. # number of the first page (usually 0 or 1)
  22. first_page_num = 1
  23. def request(query, params):
  24. query = urlencode({'q': query})[2:]
  25. fp = {'query': query}
  26. if paging and search_url.find('{pageno}') >= 0:
  27. fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
  28. params['url'] = search_url.format(**fp)
  29. params['query'] = query
  30. params['soft_max_redirects'] = soft_max_redirects
  31. return params
  32. def response(resp):
  33. results = []
  34. dom = html.fromstring(resp.text)
  35. is_onion = True if 'onions' in categories else False # pylint: disable=undefined-variable
  36. if results_xpath:
  37. for result in eval_xpath_list(dom, results_xpath):
  38. url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
  39. title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
  40. content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))
  41. tmp_result = {'url': url, 'title': title, 'content': content}
  42. # add thumbnail if available
  43. if thumbnail_xpath:
  44. thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
  45. if len(thumbnail_xpath_result) > 0:
  46. tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
  47. # add alternative cached url if available
  48. if cached_xpath:
  49. tmp_result['cached_url'] = cached_url\
  50. + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
  51. if is_onion:
  52. tmp_result['is_onion'] = True
  53. results.append(tmp_result)
  54. else:
  55. if cached_xpath:
  56. for url, title, content, cached in zip(
  57. (extract_url(x, search_url) for
  58. x in eval_xpath_list(dom, url_xpath)),
  59. map(extract_text, eval_xpath_list(dom, title_xpath)),
  60. map(extract_text, eval_xpath_list(dom, content_xpath)),
  61. map(extract_text, eval_xpath_list(dom, cached_xpath))
  62. ):
  63. results.append({'url': url, 'title': title, 'content': content,
  64. 'cached_url': cached_url + cached, 'is_onion': is_onion})
  65. else:
  66. for url, title, content in zip(
  67. (extract_url(x, search_url) for
  68. x in eval_xpath_list(dom, url_xpath)),
  69. map(extract_text, eval_xpath_list(dom, title_xpath)),
  70. map(extract_text, eval_xpath_list(dom, content_xpath))
  71. ):
  72. results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})
  73. if not suggestion_xpath:
  74. return results
  75. for suggestion in eval_xpath(dom, suggestion_xpath):
  76. results.append({'suggestion': extract_text(suggestion)})
  77. return results