xpath.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. # pylint: disable=missing-function-docstring
  4. """The XPath engine is a *generic* engine with which it is possible to configure
  5. engines in the settings.
  6. Here is a simple example of a XPath engine configured in the
  7. :ref:`settings engine` section, further read :ref:`engines-dev`.
  8. .. code:: yaml
  9. - name : bitbucket
  10. engine : xpath
  11. paging : True
  12. search_url : https://bitbucket.org/repo/all/{pageno}?name={query}
  13. url_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]/@href
  14. title_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]
  15. content_xpath : //article[@class="repo-summary"]/p
  16. """
  17. from urllib.parse import urlencode
  18. from lxml import html
  19. from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
  20. from searx import logger
  21. logger = logger.getChild('XPath engine')
  22. search_url = None
  23. """
  24. Search URL of the engine, replacements are:
  25. ``{query}``:
  26. Search terms from user.
  27. ``{pageno}``:
  28. Page number if engine supports pagging :py:obj:`paging`
  29. ``{lang}``:
  30. ISO 639-1 language code (en, de, fr ..)
  31. """
  32. lang_all='en'
  33. '''Replacement ``{lang}`` in :py:obj:`search_url` if language ``all`` is
  34. selected.
  35. '''
  36. soft_max_redirects = 0
  37. '''Maximum redirects, soft limit. Record an error but don't stop the engine'''
  38. results_xpath = ''
  39. '''XPath selector for the list of result items'''
  40. url_xpath = None
  41. '''XPath selector of result's ``url``.'''
  42. content_xpath = None
  43. '''XPath selector of result's ``content``.'''
  44. title_xpath = None
  45. '''XPath selector of result's ``title``.'''
  46. thumbnail_xpath = False
  47. '''XPath selector of result's ``img_src``.'''
  48. suggestion_xpath = ''
  49. '''XPath selector of result's ``suggestion``.'''
  50. cached_xpath = ''
  51. cached_url = ''
  52. paging = False
  53. '''Engine supports paging [True or False].'''
  54. page_size = 1
  55. '''Number of results on each page. Only needed if the site requires not a page
  56. number, but an offset.'''
  57. first_page_num = 1
  58. '''Number of the first page (usually 0 or 1).'''
  59. def request(query, params):
  60. '''Build request parameters (see :ref:`engine request`).
  61. '''
  62. lang = lang_all
  63. if params['language'] != 'all':
  64. lang = params['language'][:2]
  65. fargs = {
  66. 'query': urlencode({'q': query})[2:],
  67. 'lang': lang,
  68. 'pageno': (params['pageno'] - 1) * page_size + first_page_num
  69. }
  70. params['url'] = search_url.format(**fargs)
  71. params['soft_max_redirects'] = soft_max_redirects
  72. logger.debug("query_url --> %s", params['url'])
  73. return params
  74. def response(resp):
  75. '''Scrap *results* from the response (see :ref:`engine results`).
  76. '''
  77. results = []
  78. dom = html.fromstring(resp.text)
  79. is_onion = 'onions' in categories # pylint: disable=undefined-variable
  80. if results_xpath:
  81. for result in eval_xpath_list(dom, results_xpath):
  82. url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
  83. title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
  84. content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))
  85. tmp_result = {'url': url, 'title': title, 'content': content}
  86. # add thumbnail if available
  87. if thumbnail_xpath:
  88. thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
  89. if len(thumbnail_xpath_result) > 0:
  90. tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
  91. # add alternative cached url if available
  92. if cached_xpath:
  93. tmp_result['cached_url'] = (
  94. cached_url
  95. + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
  96. )
  97. if is_onion:
  98. tmp_result['is_onion'] = True
  99. results.append(tmp_result)
  100. else:
  101. if cached_xpath:
  102. for url, title, content, cached in zip(
  103. (extract_url(x, search_url) for
  104. x in eval_xpath_list(dom, url_xpath)),
  105. map(extract_text, eval_xpath_list(dom, title_xpath)),
  106. map(extract_text, eval_xpath_list(dom, content_xpath)),
  107. map(extract_text, eval_xpath_list(dom, cached_xpath))
  108. ):
  109. results.append({
  110. 'url': url,
  111. 'title': title,
  112. 'content': content,
  113. 'cached_url': cached_url + cached, 'is_onion': is_onion
  114. })
  115. else:
  116. for url, title, content in zip(
  117. (extract_url(x, search_url) for
  118. x in eval_xpath_list(dom, url_xpath)),
  119. map(extract_text, eval_xpath_list(dom, title_xpath)),
  120. map(extract_text, eval_xpath_list(dom, content_xpath))
  121. ):
  122. results.append({
  123. 'url': url,
  124. 'title': title,
  125. 'content': content,
  126. 'is_onion': is_onion
  127. })
  128. if suggestion_xpath:
  129. for suggestion in eval_xpath(dom, suggestion_xpath):
  130. results.append({'suggestion': extract_text(suggestion)})
  131. logger.debug("found %s results", len(results))
  132. return results