presearch.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Presearch (general, images, videos, news)
  4. .. hint::
  5. The results in the video category are most often links to pages that contain
  6. a video, for instance many links from preasearch's video category link
  7. content from facebook (aka Meta) or Twitter (aka X). Since these are not
  8. real links to video streams SearXNG can't use the video template for this and
  9. if SearXNG can't use this template, then the user doesn't want to see these
  10. hits in the videos category.
  11. TL;DR; by default presearch's video category is placed into categories::
  12. categories: [general, web]
  13. """
  14. from urllib.parse import urlencode
  15. from searx.network import get
  16. from searx.utils import gen_useragent, html_to_text
  17. about = {
  18. "website": "https://presearch.io",
  19. "wikidiata_id": "Q7240905",
  20. "official_api_documentation": "https://docs.presearch.io/nodes/api",
  21. "use_official_api": False,
  22. "require_api_key": False,
  23. "results": "JSON",
  24. }
  25. paging = True
  26. safesearch = True
  27. time_range_support = True
  28. categories = ["general", "web"] # general, images, videos, news
  29. search_type = "search"
  30. """must be any of ``search``, ``images``, ``videos``, ``news``"""
  31. base_url = "https://presearch.com"
  32. safesearch_map = {0: 'false', 1: 'true', 2: 'true'}
  33. def init(_):
  34. if search_type not in ['search', 'images', 'videos', 'news']:
  35. raise ValueError(f'presearch search_type: {search_type}')
  36. def _get_request_id(query, page, time_range, safesearch_param):
  37. args = {
  38. "q": query,
  39. "page": page,
  40. }
  41. if time_range:
  42. args["time"] = time_range
  43. url = f"{base_url}/{search_type}?{urlencode(args)}"
  44. headers = {
  45. 'User-Agent': gen_useragent(),
  46. 'Cookie': f"b=1;presearch_session=;use_safe_search={safesearch_map[safesearch_param]}",
  47. }
  48. resp_text = get(url, headers=headers).text # type: ignore
  49. for line in resp_text.split("\n"):
  50. if "window.searchId = " in line:
  51. return line.split("= ")[1][:-1].replace('"', "")
  52. return None
  53. def request(query, params):
  54. request_id = _get_request_id(query, params["pageno"], params["time_range"], params["safesearch"])
  55. params["headers"]["Accept"] = "application/json"
  56. params["url"] = f"{base_url}/results?id={request_id}"
  57. return params
  58. def _strip_leading_strings(text):
  59. for x in ['wikipedia', 'google']:
  60. if text.lower().endswith(x):
  61. text = text[: -len(x)]
  62. return text.strip()
  63. def parse_search_query(json_results):
  64. results = []
  65. for item in json_results.get('specialSections', {}).get('topStoriesCompact', {}).get('data', []):
  66. result = {
  67. 'url': item['link'],
  68. 'title': item['title'],
  69. 'img_src': item['image'],
  70. 'content': '',
  71. 'metadata': item.get('source'),
  72. }
  73. results.append(result)
  74. for item in json_results.get('standardResults', []):
  75. result = {
  76. 'url': item['link'],
  77. 'title': item['title'],
  78. 'content': html_to_text(item['description']),
  79. }
  80. results.append(result)
  81. info = json_results.get('infoSection', {}).get('data')
  82. if info:
  83. attributes = []
  84. for item in info.get('about', []):
  85. label, value = html_to_text(item).split(':', 1)
  86. value = _strip_leading_strings(value)
  87. attributes.append({'label': label, 'value': value})
  88. content = []
  89. for item in [info['subtitle'], info['description']]:
  90. item = _strip_leading_strings(html_to_text(item))
  91. if item:
  92. content.append(item)
  93. results.append(
  94. {
  95. 'infobox': info['title'],
  96. 'id': info['title'],
  97. 'img_src': info.get('image'),
  98. 'content': ' | '.join(content),
  99. 'attributes': attributes,
  100. }
  101. )
  102. return results
  103. def response(resp):
  104. results = []
  105. json_resp = resp.json()
  106. if search_type == 'search':
  107. results = parse_search_query(json_resp['results'])
  108. elif search_type == 'images':
  109. for item in json_resp.get('images', []):
  110. results.append(
  111. {
  112. 'template': 'images.html',
  113. 'title': item['title'],
  114. 'url': item['link'],
  115. 'img_src': item['image'],
  116. 'thumbnail_src': item['thumbnail'],
  117. }
  118. )
  119. elif search_type == 'videos':
  120. # The results in the video category are most often links to pages that contain
  121. # a video and not to a video stream --> SearXNG can't use the video template.
  122. for item in json_resp.get('videos', []):
  123. metadata = [x for x in [item.get('description'), item.get('duration')] if x]
  124. results.append(
  125. {
  126. 'title': item['title'],
  127. 'url': item['link'],
  128. 'content': '',
  129. 'metadata': ' / '.join(metadata),
  130. 'img_src': item.get('image'),
  131. }
  132. )
  133. elif search_type == 'news':
  134. for item in json_resp.get('news', []):
  135. metadata = [x for x in [item.get('source'), item.get('time')] if x]
  136. results.append(
  137. {
  138. 'title': item['title'],
  139. 'url': item['link'],
  140. 'content': item['description'],
  141. 'metadata': ' / '.join(metadata),
  142. 'img_src': item.get('image'),
  143. }
  144. )
  145. return results