results.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. import re
  2. from collections import defaultdict
  3. from operator import itemgetter
  4. from threading import RLock
  5. from typing import List, NamedTuple, Set
  6. from urllib.parse import urlparse, unquote
  7. from searx import logger
  8. from searx import utils
  9. from searx.engines import engines
  10. from searx.metrics import histogram_observe, counter_add, count_error
  11. CONTENT_LEN_IGNORED_CHARS_REGEX = re.compile(r'[,;:!?\./\\\\ ()-_]', re.M | re.U)
  12. WHITESPACE_REGEX = re.compile('( |\t|\n)+', re.M | re.U)
  13. # return the meaningful length of the content for a result
  14. def result_content_len(content):
  15. if isinstance(content, str):
  16. return len(CONTENT_LEN_IGNORED_CHARS_REGEX.sub('', content))
  17. else:
  18. return 0
  19. def compare_urls(url_a, url_b):
  20. """Lazy compare between two URL.
  21. "www.example.com" and "example.com" are equals.
  22. "www.example.com/path/" and "www.example.com/path" are equals.
  23. "https://www.example.com/" and "http://www.example.com/" are equals.
  24. Args:
  25. url_a (ParseResult): first URL
  26. url_b (ParseResult): second URL
  27. Returns:
  28. bool: True if url_a and url_b are equals
  29. """
  30. # ignore www. in comparison
  31. if url_a.netloc.startswith('www.'):
  32. host_a = url_a.netloc.replace('www.', '', 1)
  33. else:
  34. host_a = url_a.netloc
  35. if url_b.netloc.startswith('www.'):
  36. host_b = url_b.netloc.replace('www.', '', 1)
  37. else:
  38. host_b = url_b.netloc
  39. if host_a != host_b or url_a.query != url_b.query or url_a.fragment != url_b.fragment:
  40. return False
  41. # remove / from the end of the url if required
  42. path_a = url_a.path[:-1] if url_a.path.endswith('/') else url_a.path
  43. path_b = url_b.path[:-1] if url_b.path.endswith('/') else url_b.path
  44. return unquote(path_a) == unquote(path_b)
  45. def merge_two_infoboxes(infobox1, infobox2):
  46. # get engines weights
  47. if hasattr(engines[infobox1['engine']], 'weight'):
  48. weight1 = engines[infobox1['engine']].weight
  49. else:
  50. weight1 = 1
  51. if hasattr(engines[infobox2['engine']], 'weight'):
  52. weight2 = engines[infobox2['engine']].weight
  53. else:
  54. weight2 = 1
  55. if weight2 > weight1:
  56. infobox1['engine'] = infobox2['engine']
  57. infobox1['engines'] |= infobox2['engines']
  58. if 'urls' in infobox2:
  59. urls1 = infobox1.get('urls', None)
  60. if urls1 is None:
  61. urls1 = []
  62. for url2 in infobox2.get('urls', []):
  63. unique_url = True
  64. parsed_url2 = urlparse(url2.get('url', ''))
  65. entity_url2 = url2.get('entity')
  66. for url1 in urls1:
  67. if (entity_url2 is not None and url1.get('entity') == entity_url2) or compare_urls(
  68. urlparse(url1.get('url', '')), parsed_url2
  69. ):
  70. unique_url = False
  71. break
  72. if unique_url:
  73. urls1.append(url2)
  74. infobox1['urls'] = urls1
  75. if 'img_src' in infobox2:
  76. img1 = infobox1.get('img_src', None)
  77. img2 = infobox2.get('img_src')
  78. if img1 is None:
  79. infobox1['img_src'] = img2
  80. elif weight2 > weight1:
  81. infobox1['img_src'] = img2
  82. if 'attributes' in infobox2:
  83. attributes1 = infobox1.get('attributes')
  84. if attributes1 is None:
  85. infobox1['attributes'] = attributes1 = []
  86. attributeSet = set()
  87. for attribute in attributes1:
  88. label = attribute.get('label')
  89. if label not in attributeSet:
  90. attributeSet.add(label)
  91. entity = attribute.get('entity')
  92. if entity not in attributeSet:
  93. attributeSet.add(entity)
  94. for attribute in infobox2.get('attributes', []):
  95. if attribute.get('label') not in attributeSet and attribute.get('entity') not in attributeSet:
  96. attributes1.append(attribute)
  97. if 'content' in infobox2:
  98. content1 = infobox1.get('content', None)
  99. content2 = infobox2.get('content', '')
  100. if content1 is not None:
  101. if result_content_len(content2) > result_content_len(content1):
  102. infobox1['content'] = content2
  103. else:
  104. infobox1['content'] = content2
  105. def result_score(result):
  106. weight = 1.0
  107. for result_engine in result['engines']:
  108. if hasattr(engines[result_engine], 'weight'):
  109. weight *= float(engines[result_engine].weight)
  110. occurrences = len(result['positions'])
  111. return sum((occurrences * weight) / position for position in result['positions'])
  112. class Timing(NamedTuple):
  113. engine: str
  114. total: float
  115. load: float
  116. class UnresponsiveEngine(NamedTuple):
  117. engine: str
  118. error_type: str
  119. suspended: bool
  120. class ResultContainer:
  121. """docstring for ResultContainer"""
  122. __slots__ = (
  123. '_merged_results',
  124. 'infoboxes',
  125. 'suggestions',
  126. 'answers',
  127. 'corrections',
  128. '_number_of_results',
  129. '_closed',
  130. 'paging',
  131. 'unresponsive_engines',
  132. 'timings',
  133. 'redirect_url',
  134. 'engine_data',
  135. 'on_result',
  136. '_lock',
  137. )
  138. def __init__(self):
  139. super().__init__()
  140. self._merged_results = []
  141. self.infoboxes = []
  142. self.suggestions = set()
  143. self.answers = {}
  144. self.corrections = set()
  145. self._number_of_results = []
  146. self.engine_data = defaultdict(dict)
  147. self._closed = False
  148. self.paging = False
  149. self.unresponsive_engines: Set[UnresponsiveEngine] = set()
  150. self.timings: List[Timing] = []
  151. self.redirect_url = None
  152. self.on_result = lambda _: True
  153. self._lock = RLock()
  154. def extend(self, engine_name, results):
  155. if self._closed:
  156. return
  157. standard_result_count = 0
  158. error_msgs = set()
  159. for result in list(results):
  160. result['engine'] = engine_name
  161. if 'suggestion' in result and self.on_result(result):
  162. self.suggestions.add(result['suggestion'])
  163. elif 'answer' in result and self.on_result(result):
  164. self.answers[result['answer']] = result
  165. elif 'correction' in result and self.on_result(result):
  166. self.corrections.add(result['correction'])
  167. elif 'infobox' in result and self.on_result(result):
  168. self._merge_infobox(result)
  169. elif 'number_of_results' in result and self.on_result(result):
  170. self._number_of_results.append(result['number_of_results'])
  171. elif 'engine_data' in result and self.on_result(result):
  172. self.engine_data[engine_name][result['key']] = result['engine_data']
  173. elif 'url' in result:
  174. # standard result (url, title, content)
  175. if not self._is_valid_url_result(result, error_msgs):
  176. continue
  177. # normalize the result
  178. self._normalize_url_result(result)
  179. # call on_result call searx.search.SearchWithPlugins._on_result
  180. # which calls the plugins
  181. if not self.on_result(result):
  182. continue
  183. self.__merge_url_result(result, standard_result_count + 1)
  184. standard_result_count += 1
  185. elif self.on_result(result):
  186. self.__merge_result_no_url(result, standard_result_count + 1)
  187. standard_result_count += 1
  188. if len(error_msgs) > 0:
  189. for msg in error_msgs:
  190. count_error(engine_name, 'some results are invalids: ' + msg, secondary=True)
  191. if engine_name in engines:
  192. histogram_observe(standard_result_count, 'engine', engine_name, 'result', 'count')
  193. if not self.paging and engine_name in engines and engines[engine_name].paging:
  194. self.paging = True
  195. def _merge_infobox(self, infobox):
  196. add_infobox = True
  197. infobox_id = infobox.get('id', None)
  198. infobox['engines'] = set([infobox['engine']])
  199. if infobox_id is not None:
  200. parsed_url_infobox_id = urlparse(infobox_id)
  201. with self._lock:
  202. for existingIndex in self.infoboxes:
  203. if compare_urls(urlparse(existingIndex.get('id', '')), parsed_url_infobox_id):
  204. merge_two_infoboxes(existingIndex, infobox)
  205. add_infobox = False
  206. if add_infobox:
  207. self.infoboxes.append(infobox)
  208. def _is_valid_url_result(self, result, error_msgs):
  209. if 'url' in result:
  210. if not isinstance(result['url'], str):
  211. logger.debug('result: invalid URL: %s', str(result))
  212. error_msgs.add('invalid URL')
  213. return False
  214. if 'title' in result and not isinstance(result['title'], str):
  215. logger.debug('result: invalid title: %s', str(result))
  216. error_msgs.add('invalid title')
  217. return False
  218. if 'content' in result:
  219. if not isinstance(result['content'], str):
  220. logger.debug('result: invalid content: %s', str(result))
  221. error_msgs.add('invalid content')
  222. return False
  223. return True
  224. def _normalize_url_result(self, result):
  225. """Return True if the result is valid"""
  226. result['parsed_url'] = urlparse(result['url'])
  227. # if the result has no scheme, use http as default
  228. if not result['parsed_url'].scheme:
  229. result['parsed_url'] = result['parsed_url']._replace(scheme="http")
  230. result['url'] = result['parsed_url'].geturl()
  231. # avoid duplicate content between the content and title fields
  232. if result.get('content') == result.get('title'):
  233. del result['content']
  234. # make sure there is a template
  235. if 'template' not in result:
  236. result['template'] = 'default.html'
  237. # strip multiple spaces and carriage returns from content
  238. if result.get('content'):
  239. result['content'] = WHITESPACE_REGEX.sub(' ', result['content'])
  240. def __merge_url_result(self, result, position):
  241. result['engines'] = set([result['engine']])
  242. with self._lock:
  243. duplicated = self.__find_duplicated_http_result(result)
  244. if duplicated:
  245. self.__merge_duplicated_http_result(duplicated, result, position)
  246. return
  247. # if there is no duplicate found, append result
  248. result['positions'] = [position]
  249. self._merged_results.append(result)
  250. def __find_duplicated_http_result(self, result):
  251. result_template = result.get('template')
  252. for merged_result in self._merged_results:
  253. if 'parsed_url' not in merged_result:
  254. continue
  255. if compare_urls(result['parsed_url'], merged_result['parsed_url']) and result_template == merged_result.get(
  256. 'template'
  257. ):
  258. if result_template != 'images.html':
  259. # not an image, same template, same url : it's a duplicate
  260. return merged_result
  261. else:
  262. # it's an image
  263. # it's a duplicate if the parsed_url, template and img_src are different
  264. if result.get('img_src', '') == merged_result.get('img_src', ''):
  265. return merged_result
  266. return None
  267. def __merge_duplicated_http_result(self, duplicated, result, position):
  268. # using content with more text
  269. if result_content_len(result.get('content', '')) > result_content_len(duplicated.get('content', '')):
  270. duplicated['content'] = result['content']
  271. # merge all result's parameters not found in duplicate
  272. for key in result.keys():
  273. if not duplicated.get(key):
  274. duplicated[key] = result.get(key)
  275. # add the new position
  276. duplicated['positions'].append(position)
  277. # add engine to list of result-engines
  278. duplicated['engines'].add(result['engine'])
  279. # using https if possible
  280. if duplicated['parsed_url'].scheme != 'https' and result['parsed_url'].scheme == 'https':
  281. duplicated['url'] = result['parsed_url'].geturl()
  282. duplicated['parsed_url'] = result['parsed_url']
  283. def __merge_result_no_url(self, result, position):
  284. result['engines'] = set([result['engine']])
  285. result['positions'] = [position]
  286. with self._lock:
  287. self._merged_results.append(result)
  288. def close(self):
  289. self._closed = True
  290. for result in self._merged_results:
  291. score = result_score(result)
  292. result['score'] = score
  293. # removing html content and whitespace duplications
  294. if result.get('content'):
  295. result['content'] = utils.html_to_text(result['content']).strip()
  296. if result.get('title'):
  297. result['title'] = ' '.join(utils.html_to_text(result['title']).strip().split())
  298. for result_engine in result['engines']:
  299. counter_add(score, 'engine', result_engine, 'score')
  300. results = sorted(self._merged_results, key=itemgetter('score'), reverse=True)
  301. # pass 2 : group results by category and template
  302. gresults = []
  303. categoryPositions = {}
  304. for res in results:
  305. # FIXME : handle more than one category per engine
  306. engine = engines[res['engine']]
  307. res['category'] = engine.categories[0] if len(engine.categories) > 0 else ''
  308. # FIXME : handle more than one category per engine
  309. category = (
  310. res['category']
  311. + ':'
  312. + res.get('template', '')
  313. + ':'
  314. + ('img_src' if 'img_src' in res or 'thumbnail' in res else '')
  315. )
  316. current = None if category not in categoryPositions else categoryPositions[category]
  317. # group with previous results using the same category
  318. # if the group can accept more result and is not too far
  319. # from the current position
  320. if current is not None and (current['count'] > 0) and (len(gresults) - current['index'] < 20):
  321. # group with the previous results using
  322. # the same category with this one
  323. index = current['index']
  324. gresults.insert(index, res)
  325. # update every index after the current one
  326. # (including the current one)
  327. for k in categoryPositions:
  328. v = categoryPositions[k]['index']
  329. if v >= index:
  330. categoryPositions[k]['index'] = v + 1
  331. # update this category
  332. current['count'] -= 1
  333. else:
  334. # same category
  335. gresults.append(res)
  336. # update categoryIndex
  337. categoryPositions[category] = {'index': len(gresults), 'count': 8}
  338. # update _merged_results
  339. self._merged_results = gresults
  340. def get_ordered_results(self):
  341. if not self._closed:
  342. self.close()
  343. return self._merged_results
  344. def results_length(self):
  345. return len(self._merged_results)
  346. @property
  347. def number_of_results(self) -> int:
  348. """Returns the average of results number, returns zero if the average
  349. result number is smaller than the actual result count."""
  350. resultnum_sum = sum(self._number_of_results)
  351. if not resultnum_sum or not self._number_of_results:
  352. return 0
  353. average = int(resultnum_sum / len(self._number_of_results))
  354. if average < self.results_length():
  355. average = 0
  356. return average
  357. def add_unresponsive_engine(self, engine_name: str, error_type: str, suspended: bool = False):
  358. if engines[engine_name].display_error_messages:
  359. self.unresponsive_engines.add(UnresponsiveEngine(engine_name, error_type, suspended))
  360. def add_timing(self, engine_name: str, engine_time: float, page_load_time: float):
  361. self.timings.append(Timing(engine_name, total=engine_time, load=page_load_time))
  362. def get_timings(self):
  363. return self.timings