presearch.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Presearch supports the search types listed in :py:obj:`search_type` (general,
  4. images, videos, news).
  5. Configured ``presarch`` engines:
  6. .. code:: yaml
  7. - name: presearch
  8. engine: presearch
  9. search_type: search
  10. categories: [general, web]
  11. - name: presearch images
  12. ...
  13. search_type: images
  14. categories: [images, web]
  15. - name: presearch videos
  16. ...
  17. search_type: videos
  18. categories: [general, web]
  19. - name: presearch news
  20. ...
  21. search_type: news
  22. categories: [news, web]
  23. .. hint::
  24. By default Presearch's video category is intentionally placed into::
  25. categories: [general, web]
  26. Search type ``video``
  27. =====================
  28. The results in the video category are most often links to pages that contain a
  29. video, for instance many links from Preasearch's video category link content
  30. from facebook (aka Meta) or Twitter (aka X). Since these are not real links to
  31. video streams SearXNG can't use the video template for this and if SearXNG can't
  32. use this template, then the user doesn't want to see these hits in the videos
  33. category.
  34. Languages & Regions
  35. ===================
  36. In Presearch there are languages for the UI and regions for narrowing down the
  37. search. If we set "auto" for the region in the WEB-UI of Presearch and cookie
  38. ``use_local_search_results=false``, then the defaults are set for both (the
  39. language and the region) from the ``Accept-Language`` header.
  40. Since the region is already "auto" by default, we only need to set the
  41. ``use_local_search_results`` cookie and send the ``Accept-Language`` header. We
  42. have to set these values in both requests we send to Presearch; in the first
  43. request to get the request-ID from Presearch and in the final request to get the
  44. result list (see ``send_accept_language_header``).
  45. Implementations
  46. ===============
  47. """
  48. from urllib.parse import urlencode
  49. from searx import locales
  50. from searx.network import get
  51. from searx.utils import gen_useragent, html_to_text
  52. about = {
  53. "website": "https://presearch.io",
  54. "wikidiata_id": "Q7240905",
  55. "official_api_documentation": "https://docs.presearch.io/nodes/api",
  56. "use_official_api": False,
  57. "require_api_key": False,
  58. "results": "JSON",
  59. }
  60. paging = True
  61. safesearch = True
  62. time_range_support = True
  63. send_accept_language_header = True
  64. categories = ["general", "web"] # general, images, videos, news
  65. search_type = "search"
  66. """must be any of ``search``, ``images``, ``videos``, ``news``"""
  67. base_url = "https://presearch.com"
  68. safesearch_map = {0: 'false', 1: 'true', 2: 'true'}
  69. def init(_):
  70. if search_type not in ['search', 'images', 'videos', 'news']:
  71. raise ValueError(f'presearch search_type: {search_type}')
  72. def _get_request_id(query, params):
  73. args = {
  74. "q": query,
  75. "page": params["pageno"],
  76. }
  77. if params["time_range"]:
  78. args["time"] = params["time_range"]
  79. url = f"{base_url}/{search_type}?{urlencode(args)}"
  80. headers = {
  81. 'User-Agent': gen_useragent(),
  82. 'Cookie': (
  83. f"b=1;"
  84. f" presearch_session=;"
  85. f" use_local_search_results=false;"
  86. f" use_safe_search={safesearch_map[params['safesearch']]}"
  87. ),
  88. }
  89. if params['searxng_locale'] != 'all':
  90. l = locales.get_locale(params['searxng_locale'])
  91. # Presearch narrows down the search by region. In SearXNG when the user
  92. # does not set a region (e.g. 'en-CA' / canada) we cannot hand over a
  93. # region.
  94. # We could possibly use searx.locales.get_official_locales to determine
  95. # in which regions this language is an official one, but then we still
  96. # wouldn't know which region should be given more weight / Presearch
  97. # performs an IP-based geolocation of the user, we don't want that in
  98. # SearXNG ;-)
  99. if l.territory:
  100. headers['Accept-Language'] = f"{l.language}-{l.territory},{l.language};" "q=0.9,*;" "q=0.5"
  101. resp_text = get(url, headers=headers).text # type: ignore
  102. for line in resp_text.split("\n"):
  103. if "window.searchId = " in line:
  104. return line.split("= ")[1][:-1].replace('"', "")
  105. return None
  106. def request(query, params):
  107. request_id = _get_request_id(query, params)
  108. params["headers"]["Accept"] = "application/json"
  109. params["url"] = f"{base_url}/results?id={request_id}"
  110. return params
  111. def _strip_leading_strings(text):
  112. for x in ['wikipedia', 'google']:
  113. if text.lower().endswith(x):
  114. text = text[: -len(x)]
  115. return text.strip()
  116. def parse_search_query(json_results):
  117. results = []
  118. for item in json_results.get('specialSections', {}).get('topStoriesCompact', {}).get('data', []):
  119. result = {
  120. 'url': item['link'],
  121. 'title': item['title'],
  122. 'img_src': item['image'],
  123. 'content': '',
  124. 'metadata': item.get('source'),
  125. }
  126. results.append(result)
  127. for item in json_results.get('standardResults', []):
  128. result = {
  129. 'url': item['link'],
  130. 'title': item['title'],
  131. 'content': html_to_text(item['description']),
  132. }
  133. results.append(result)
  134. info = json_results.get('infoSection', {}).get('data')
  135. if info:
  136. attributes = []
  137. for item in info.get('about', []):
  138. text = html_to_text(item)
  139. if ':' in text:
  140. # split text into key / value
  141. label, value = text.split(':', 1)
  142. else:
  143. # In other languages (tested with zh-TW) a colon is represented
  144. # by a different symbol --> then we split at the first space.
  145. label, value = text.split(' ', 1)
  146. label = label[:-1]
  147. value = _strip_leading_strings(value)
  148. attributes.append({'label': label, 'value': value})
  149. content = []
  150. for item in [info.get('subtitle'), info.get('description')]:
  151. if not item:
  152. continue
  153. item = _strip_leading_strings(html_to_text(item))
  154. if item:
  155. content.append(item)
  156. results.append(
  157. {
  158. 'infobox': info['title'],
  159. 'id': info['title'],
  160. 'img_src': info.get('image'),
  161. 'content': ' | '.join(content),
  162. 'attributes': attributes,
  163. }
  164. )
  165. return results
  166. def response(resp):
  167. results = []
  168. json_resp = resp.json()
  169. if search_type == 'search':
  170. results = parse_search_query(json_resp.get('results'))
  171. elif search_type == 'images':
  172. for item in json_resp.get('images', []):
  173. results.append(
  174. {
  175. 'template': 'images.html',
  176. 'title': item['title'],
  177. 'url': item.get('link'),
  178. 'img_src': item.get('image'),
  179. 'thumbnail_src': item.get('thumbnail'),
  180. }
  181. )
  182. elif search_type == 'videos':
  183. # The results in the video category are most often links to pages that contain
  184. # a video and not to a video stream --> SearXNG can't use the video template.
  185. for item in json_resp.get('videos', []):
  186. metadata = [x for x in [item.get('description'), item.get('duration')] if x]
  187. results.append(
  188. {
  189. 'title': item['title'],
  190. 'url': item.get('link'),
  191. 'content': '',
  192. 'metadata': ' / '.join(metadata),
  193. 'img_src': item.get('image'),
  194. }
  195. )
  196. elif search_type == 'news':
  197. for item in json_resp.get('news', []):
  198. metadata = [x for x in [item.get('source'), item.get('time')] if x]
  199. results.append(
  200. {
  201. 'title': item['title'],
  202. 'url': item.get('link'),
  203. 'content': item.get('description', ''),
  204. 'metadata': ' / '.join(metadata),
  205. 'img_src': item.get('image'),
  206. }
  207. )
  208. return results