xpath.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. # pylint: disable=missing-function-docstring
  4. """The XPath engine is a *generic* engine with which it is possible to configure
  5. engines in the settings.
  6. Here is a simple example of a XPath engine configured in the
  7. :ref:`settings engine` section, further read :ref:`engines-dev`.
  8. .. code:: yaml
  9. - name : bitbucket
  10. engine : xpath
  11. paging : True
  12. search_url : https://bitbucket.org/repo/all/{pageno}?name={query}
  13. url_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]/@href
  14. title_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]
  15. content_xpath : //article[@class="repo-summary"]/p
  16. """
  17. from urllib.parse import urlencode
  18. from lxml import html
  19. from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
  20. from searx import logger
  21. logger = logger.getChild('XPath engine')
  22. search_url = None
  23. """
  24. Search URL of the engine. Example::
  25. https://example.org/?search={query}&page={pageno}{time_range}{safe_search}
  26. Replacements are:
  27. ``{query}``:
  28. Search terms from user.
  29. ``{pageno}``:
  30. Page number if engine supports pagging :py:obj:`paging`
  31. ``{lang}``:
  32. ISO 639-1 language code (en, de, fr ..)
  33. ``{time_range}``:
  34. :py:obj:`URL parameter <time_range_url>` if engine :py:obj:`supports time
  35. range <time_range_support>`. The value for the parameter is taken from
  36. :py:obj:`time_range_map`.
  37. ``{safe_search}``:
  38. Safe-search :py:obj:`URL parameter <safe_search_map>` if engine
  39. :py:obj:`supports safe-search <safe_search_support>`. The ``{safe_search}``
  40. replacement is taken from the :py:obj:`safes_search_map`. Filter results::
  41. 0: none, 1: moderate, 2:strict
  42. If not supported, the URL paramter is an empty string.
  43. """
  44. lang_all='en'
  45. '''Replacement ``{lang}`` in :py:obj:`search_url` if language ``all`` is
  46. selected.
  47. '''
  48. soft_max_redirects = 0
  49. '''Maximum redirects, soft limit. Record an error but don't stop the engine'''
  50. results_xpath = ''
  51. '''XPath selector for the list of result items'''
  52. url_xpath = None
  53. '''XPath selector of result's ``url``.'''
  54. content_xpath = None
  55. '''XPath selector of result's ``content``.'''
  56. title_xpath = None
  57. '''XPath selector of result's ``title``.'''
  58. thumbnail_xpath = False
  59. '''XPath selector of result's ``img_src``.'''
  60. suggestion_xpath = ''
  61. '''XPath selector of result's ``suggestion``.'''
  62. cached_xpath = ''
  63. cached_url = ''
  64. paging = False
  65. '''Engine supports paging [True or False].'''
  66. page_size = 1
  67. '''Number of results on each page. Only needed if the site requires not a page
  68. number, but an offset.'''
  69. first_page_num = 1
  70. '''Number of the first page (usually 0 or 1).'''
  71. time_range_support = False
  72. '''Engine supports search time range.'''
  73. time_range_url = '&hours={time_range_val}'
  74. '''Time range URL parameter in the in :py:obj:`search_url`. If no time range is
  75. requested by the user, the URL paramter is an empty string. The
  76. ``{time_range_val}`` replacement is taken from the :py:obj:`time_range_map`.
  77. .. code:: yaml
  78. time_range_url : '&days={time_range_val}'
  79. '''
  80. time_range_map = {
  81. 'day': 24,
  82. 'week': 24*7,
  83. 'month': 24*30,
  84. 'year': 24*365,
  85. }
  86. '''Maps time range value from user to ``{time_range_val}`` in
  87. :py:obj:`time_range_url`.
  88. .. code:: yaml
  89. time_range_map:
  90. day: 1
  91. week: 7
  92. month: 30
  93. year: 365
  94. '''
  95. safe_search_support = False
  96. '''Engine supports safe-search.'''
  97. safe_search_map = {
  98. 0: '&filter=none',
  99. 1: '&filter=moderate',
  100. 2: '&filter=strict'
  101. }
  102. '''Maps safe-search value to ``{safe_search}`` in :py:obj:`search_url`.
  103. .. code:: yaml
  104. safesearch: true
  105. safes_search_map:
  106. 0: '&filter=none'
  107. 1: '&filter=moderate'
  108. 2: '&filter=strict'
  109. '''
  110. def request(query, params):
  111. '''Build request parameters (see :ref:`engine request`).
  112. '''
  113. lang = lang_all
  114. if params['language'] != 'all':
  115. lang = params['language'][:2]
  116. time_range = ''
  117. if params.get('time_range'):
  118. time_range_val = time_range_map.get(params.get('time_range'))
  119. time_range = time_range_url.format(time_range_val=time_range_val)
  120. safe_search = ''
  121. if params['safesearch']:
  122. safe_search = safe_search_map[params['safesearch']]
  123. fargs = {
  124. 'query': urlencode({'q': query})[2:],
  125. 'lang': lang,
  126. 'pageno': (params['pageno'] - 1) * page_size + first_page_num,
  127. 'time_range' : time_range,
  128. 'safe_search' : safe_search,
  129. }
  130. params['url'] = search_url.format(**fargs)
  131. params['soft_max_redirects'] = soft_max_redirects
  132. logger.debug("query_url --> %s", params['url'])
  133. return params
  134. def response(resp):
  135. '''Scrap *results* from the response (see :ref:`engine results`).
  136. '''
  137. results = []
  138. dom = html.fromstring(resp.text)
  139. is_onion = 'onions' in categories # pylint: disable=undefined-variable
  140. if results_xpath:
  141. for result in eval_xpath_list(dom, results_xpath):
  142. url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
  143. title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
  144. content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))
  145. tmp_result = {'url': url, 'title': title, 'content': content}
  146. # add thumbnail if available
  147. if thumbnail_xpath:
  148. thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
  149. if len(thumbnail_xpath_result) > 0:
  150. tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
  151. # add alternative cached url if available
  152. if cached_xpath:
  153. tmp_result['cached_url'] = (
  154. cached_url
  155. + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
  156. )
  157. if is_onion:
  158. tmp_result['is_onion'] = True
  159. results.append(tmp_result)
  160. else:
  161. if cached_xpath:
  162. for url, title, content, cached in zip(
  163. (extract_url(x, search_url) for
  164. x in eval_xpath_list(dom, url_xpath)),
  165. map(extract_text, eval_xpath_list(dom, title_xpath)),
  166. map(extract_text, eval_xpath_list(dom, content_xpath)),
  167. map(extract_text, eval_xpath_list(dom, cached_xpath))
  168. ):
  169. results.append({
  170. 'url': url,
  171. 'title': title,
  172. 'content': content,
  173. 'cached_url': cached_url + cached, 'is_onion': is_onion
  174. })
  175. else:
  176. for url, title, content in zip(
  177. (extract_url(x, search_url) for
  178. x in eval_xpath_list(dom, url_xpath)),
  179. map(extract_text, eval_xpath_list(dom, title_xpath)),
  180. map(extract_text, eval_xpath_list(dom, content_xpath))
  181. ):
  182. results.append({
  183. 'url': url,
  184. 'title': title,
  185. 'content': content,
  186. 'is_onion': is_onion
  187. })
  188. if suggestion_xpath:
  189. for suggestion in eval_xpath(dom, suggestion_xpath):
  190. results.append({'suggestion': extract_text(suggestion)})
  191. logger.debug("found %s results", len(results))
  192. return results