search.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. import requests as requests_lib
  15. import threading
  16. import re
  17. from itertools import izip_longest, chain
  18. from operator import itemgetter
  19. from Queue import Queue
  20. from time import time
  21. from urlparse import urlparse, unquote
  22. from searx.engines import (
  23. categories, engines
  24. )
  25. from searx.languages import language_codes
  26. from searx.utils import gen_useragent
  27. from searx.query import Query
  28. number_of_searches = 0
  29. def search_request_wrapper(fn, url, engine_name, **kwargs):
  30. try:
  31. return fn(url, **kwargs)
  32. except Exception, e:
  33. # increase errors stats
  34. engines[engine_name].stats['errors'] += 1
  35. # print engine name and specific error message
  36. print('[E] Error with engine "{0}":\n\t{1}'.format(
  37. engine_name, str(e)))
  38. return
  39. def threaded_requests(requests):
  40. timeout_limit = max(r[2]['timeout'] for r in requests)
  41. search_start = time()
  42. for fn, url, request_args, engine_name in requests:
  43. request_args['timeout'] = timeout_limit
  44. th = threading.Thread(
  45. target=search_request_wrapper,
  46. args=(fn, url, engine_name),
  47. kwargs=request_args,
  48. name='search_request',
  49. )
  50. th._engine_name = engine_name
  51. th.start()
  52. for th in threading.enumerate():
  53. if th.name == 'search_request':
  54. remaining_time = max(0.0, timeout_limit - (time() - search_start))
  55. th.join(remaining_time)
  56. if th.isAlive():
  57. print('engine timeout: {0}'.format(th._engine_name))
  58. # get default reqest parameter
  59. def default_request_params():
  60. return {
  61. 'method': 'GET',
  62. 'headers': {},
  63. 'data': {},
  64. 'url': '',
  65. 'cookies': {},
  66. 'verify': True
  67. }
  68. # create a callback wrapper for the search engine results
  69. def make_callback(engine_name, results_queue, callback, params):
  70. # creating a callback wrapper for the search engine results
  71. def process_callback(response, **kwargs):
  72. response.search_params = params
  73. timeout_overhead = 0.2 # seconds
  74. search_duration = time() - params['started']
  75. timeout_limit = engines[engine_name].timeout + timeout_overhead
  76. if search_duration > timeout_limit:
  77. engines[engine_name].stats['page_load_time'] += timeout_limit
  78. engines[engine_name].stats['errors'] += 1
  79. return
  80. # callback
  81. search_results = callback(response)
  82. # add results
  83. for result in search_results:
  84. result['engine'] = engine_name
  85. results_queue.put_nowait((engine_name, search_results))
  86. # update stats with current page-load-time
  87. engines[engine_name].stats['page_load_time'] += search_duration
  88. return process_callback
  89. # return the meaningful length of the content for a result
  90. def content_result_len(content):
  91. if isinstance(content, basestring):
  92. content = re.sub('[,;:!?\./\\\\ ()-_]', '', content)
  93. return len(content)
  94. else:
  95. return 0
  96. # score results and remove duplications
  97. def score_results(results):
  98. # calculate scoring parameters
  99. flat_res = filter(
  100. None, chain.from_iterable(izip_longest(*results.values())))
  101. flat_len = len(flat_res)
  102. engines_len = len(results)
  103. results = []
  104. # pass 1: deduplication + scoring
  105. for i, res in enumerate(flat_res):
  106. res['parsed_url'] = urlparse(res['url'])
  107. res['host'] = res['parsed_url'].netloc
  108. if res['host'].startswith('www.'):
  109. res['host'] = res['host'].replace('www.', '', 1)
  110. res['engines'] = [res['engine']]
  111. weight = 1.0
  112. # strip multiple spaces and cariage returns from content
  113. if res.get('content'):
  114. res['content'] = re.sub(' +', ' ',
  115. res['content'].strip().replace('\n', ''))
  116. # get weight of this engine if possible
  117. if hasattr(engines[res['engine']], 'weight'):
  118. weight = float(engines[res['engine']].weight)
  119. # calculate score for that engine
  120. score = int((flat_len - i) / engines_len) * weight + 1
  121. # check for duplicates
  122. duplicated = False
  123. for new_res in results:
  124. # remove / from the end of the url if required
  125. p1 = res['parsed_url'].path[:-1]\
  126. if res['parsed_url'].path.endswith('/')\
  127. else res['parsed_url'].path
  128. p2 = new_res['parsed_url'].path[:-1]\
  129. if new_res['parsed_url'].path.endswith('/')\
  130. else new_res['parsed_url'].path
  131. # check if that result is a duplicate
  132. if res['host'] == new_res['host'] and\
  133. unquote(p1) == unquote(p2) and\
  134. res['parsed_url'].query == new_res['parsed_url'].query and\
  135. res.get('template') == new_res.get('template'):
  136. duplicated = new_res
  137. break
  138. # merge duplicates together
  139. if duplicated:
  140. # using content with more text
  141. if content_result_len(res.get('content', '')) >\
  142. content_result_len(duplicated.get('content', '')):
  143. duplicated['content'] = res['content']
  144. # increase result-score
  145. duplicated['score'] += score
  146. # add engine to list of result-engines
  147. duplicated['engines'].append(res['engine'])
  148. # using https if possible
  149. if duplicated['parsed_url'].scheme == 'https':
  150. continue
  151. elif res['parsed_url'].scheme == 'https':
  152. duplicated['url'] = res['parsed_url'].geturl()
  153. duplicated['parsed_url'] = res['parsed_url']
  154. # if there is no duplicate found, append result
  155. else:
  156. res['score'] = score
  157. results.append(res)
  158. results = sorted(results, key=itemgetter('score'), reverse=True)
  159. # pass 2 : group results by category and template
  160. gresults = []
  161. categoryPositions = {}
  162. for i, res in enumerate(results):
  163. # FIXME : handle more than one category per engine
  164. category = engines[res['engine']].categories[0] + ':' + ''\
  165. if 'template' not in res\
  166. else res['template']
  167. current = None if category not in categoryPositions\
  168. else categoryPositions[category]
  169. # group with previous results using the same category
  170. # if the group can accept more result and is not too far
  171. # from the current position
  172. if current is not None and (current['count'] > 0)\
  173. and (len(gresults) - current['index'] < 20):
  174. # group with the previous results using
  175. # the same category with this one
  176. index = current['index']
  177. gresults.insert(index, res)
  178. # update every index after the current one
  179. # (including the current one)
  180. for k in categoryPositions:
  181. v = categoryPositions[k]['index']
  182. if v >= index:
  183. categoryPositions[k]['index'] = v+1
  184. # update this category
  185. current['count'] -= 1
  186. else:
  187. # same category
  188. gresults.append(res)
  189. # update categoryIndex
  190. categoryPositions[category] = {'index': len(gresults), 'count': 8}
  191. # return gresults
  192. return gresults
  193. def merge_two_infoboxes(infobox1, infobox2):
  194. if 'urls' in infobox2:
  195. urls1 = infobox1.get('urls', None)
  196. if urls1 is None:
  197. urls1 = []
  198. infobox1.set('urls', urls1)
  199. urlSet = set()
  200. for url in infobox1.get('urls', []):
  201. urlSet.add(url.get('url', None))
  202. for url in infobox2.get('urls', []):
  203. if url.get('url', None) not in urlSet:
  204. urls1.append(url)
  205. if 'attributes' in infobox2:
  206. attributes1 = infobox1.get('attributes', None)
  207. if attributes1 is None:
  208. attributes1 = []
  209. infobox1.set('attributes', attributes1)
  210. attributeSet = set()
  211. for attribute in infobox1.get('attributes', []):
  212. if attribute.get('label', None) not in attributeSet:
  213. attributeSet.add(attribute.get('label', None))
  214. for attribute in infobox2.get('attributes', []):
  215. attributes1.append(attribute)
  216. if 'content' in infobox2:
  217. content1 = infobox1.get('content', None)
  218. content2 = infobox2.get('content', '')
  219. if content1 is not None:
  220. if content_result_len(content2) > content_result_len(content1):
  221. infobox1['content'] = content2
  222. else:
  223. infobox1.set('content', content2)
  224. def merge_infoboxes(infoboxes):
  225. results = []
  226. infoboxes_id = {}
  227. for infobox in infoboxes:
  228. add_infobox = True
  229. infobox_id = infobox.get('id', None)
  230. if infobox_id is not None:
  231. existingIndex = infoboxes_id.get(infobox_id, None)
  232. if existingIndex is not None:
  233. merge_two_infoboxes(results[existingIndex], infobox)
  234. add_infobox = False
  235. if add_infobox:
  236. results.append(infobox)
  237. infoboxes_id[infobox_id] = len(results)-1
  238. return results
  239. class Search(object):
  240. """Search information container"""
  241. def __init__(self, request):
  242. # init vars
  243. super(Search, self).__init__()
  244. self.query = None
  245. self.engines = []
  246. self.categories = []
  247. self.paging = False
  248. self.pageno = 1
  249. self.lang = 'all'
  250. # set blocked engines
  251. if request.cookies.get('blocked_engines'):
  252. self.blocked_engines = request.cookies['blocked_engines'].split(',') # noqa
  253. else:
  254. self.blocked_engines = []
  255. self.results = []
  256. self.suggestions = []
  257. self.answers = []
  258. self.infoboxes = []
  259. self.request_data = {}
  260. # set specific language if set
  261. if request.cookies.get('language')\
  262. and request.cookies['language'] in (x[0] for x in language_codes):
  263. self.lang = request.cookies['language']
  264. # set request method
  265. if request.method == 'POST':
  266. self.request_data = request.form
  267. else:
  268. self.request_data = request.args
  269. # TODO better exceptions
  270. if not self.request_data.get('q'):
  271. raise Exception('noquery')
  272. # set pagenumber
  273. pageno_param = self.request_data.get('pageno', '1')
  274. if not pageno_param.isdigit() or int(pageno_param) < 1:
  275. raise Exception('wrong pagenumber')
  276. self.pageno = int(pageno_param)
  277. # parse query, if tags are set, which change
  278. # the serch engine or search-language
  279. query_obj = Query(self.request_data['q'], self.blocked_engines)
  280. query_obj.parse_query()
  281. # set query
  282. self.query = query_obj.getSearchQuery()
  283. # get last selected language in query, if possible
  284. # TODO support search with multible languages
  285. if len(query_obj.languages):
  286. self.lang = query_obj.languages[-1]
  287. self.engines = query_obj.engines
  288. self.categories = []
  289. # if engines are calculated from query,
  290. # set categories by using that informations
  291. if self.engines:
  292. self.categories = list(set(engine['category']
  293. for engine in self.engines))
  294. # otherwise, using defined categories to
  295. # calculate which engines should be used
  296. else:
  297. # set used categories
  298. for pd_name, pd in self.request_data.items():
  299. if pd_name.startswith('category_'):
  300. category = pd_name[9:]
  301. # if category is not found in list, skip
  302. if category not in categories:
  303. continue
  304. # add category to list
  305. self.categories.append(category)
  306. # if no category is specified for this search,
  307. # using user-defined default-configuration which
  308. # (is stored in cookie)
  309. if not self.categories:
  310. cookie_categories = request.cookies.get('categories', '')
  311. cookie_categories = cookie_categories.split(',')
  312. for ccateg in cookie_categories:
  313. if ccateg in categories:
  314. self.categories.append(ccateg)
  315. # if still no category is specified, using general
  316. # as default-category
  317. if not self.categories:
  318. self.categories = ['general']
  319. # using all engines for that search, which are
  320. # declared under the specific categories
  321. for categ in self.categories:
  322. self.engines.extend({'category': categ,
  323. 'name': x.name}
  324. for x in categories[categ]
  325. if x.name not in self.blocked_engines)
  326. # do search-request
  327. def search(self, request):
  328. global number_of_searches
  329. # init vars
  330. requests = []
  331. results_queue = Queue()
  332. results = {}
  333. suggestions = set()
  334. answers = set()
  335. infoboxes = []
  336. # increase number of searches
  337. number_of_searches += 1
  338. # set default useragent
  339. # user_agent = request.headers.get('User-Agent', '')
  340. user_agent = gen_useragent()
  341. # start search-reqest for all selected engines
  342. for selected_engine in self.engines:
  343. if selected_engine['name'] not in engines:
  344. continue
  345. engine = engines[selected_engine['name']]
  346. # if paging is not supported, skip
  347. if self.pageno > 1 and not engine.paging:
  348. continue
  349. # if search-language is set and engine does not
  350. # provide language-support, skip
  351. if self.lang != 'all' and not engine.language_support:
  352. continue
  353. # set default request parameters
  354. request_params = default_request_params()
  355. request_params['headers']['User-Agent'] = user_agent
  356. request_params['category'] = selected_engine['category']
  357. request_params['started'] = time()
  358. request_params['pageno'] = self.pageno
  359. request_params['language'] = self.lang
  360. # update request parameters dependent on
  361. # search-engine (contained in engines folder)
  362. engine.request(self.query.encode('utf-8'), request_params)
  363. if request_params['url'] is None:
  364. # TODO add support of offline engines
  365. pass
  366. # create a callback wrapper for the search engine results
  367. callback = make_callback(
  368. selected_engine['name'],
  369. results_queue,
  370. engine.response,
  371. request_params)
  372. # create dictionary which contain all
  373. # informations about the request
  374. request_args = dict(
  375. headers=request_params['headers'],
  376. hooks=dict(response=callback),
  377. cookies=request_params['cookies'],
  378. timeout=engine.timeout,
  379. verify=request_params['verify']
  380. )
  381. # specific type of request (GET or POST)
  382. if request_params['method'] == 'GET':
  383. req = requests_lib.get
  384. else:
  385. req = requests_lib.post
  386. request_args['data'] = request_params['data']
  387. # ignoring empty urls
  388. if not request_params['url']:
  389. continue
  390. # append request to list
  391. requests.append((req, request_params['url'],
  392. request_args,
  393. selected_engine['name']))
  394. if not requests:
  395. return results, suggestions, answers, infoboxes
  396. # send all search-request
  397. threaded_requests(requests)
  398. while not results_queue.empty():
  399. engine_name, engine_results = results_queue.get_nowait()
  400. # TODO type checks
  401. [suggestions.add(x['suggestion'])
  402. for x in list(engine_results)
  403. if 'suggestion' in x
  404. and engine_results.remove(x) is None]
  405. [answers.add(x['answer'])
  406. for x in list(engine_results)
  407. if 'answer' in x
  408. and engine_results.remove(x) is None]
  409. infoboxes.extend(x for x in list(engine_results)
  410. if 'infobox' in x
  411. and engine_results.remove(x) is None)
  412. results[engine_name] = engine_results
  413. # update engine-specific stats
  414. for engine_name, engine_results in results.items():
  415. engines[engine_name].stats['search_count'] += 1
  416. engines[engine_name].stats['result_count'] += len(engine_results)
  417. # score results and remove duplications
  418. results = score_results(results)
  419. # merge infoboxes according to their ids
  420. infoboxes = merge_infoboxes(infoboxes)
  421. # update engine stats, using calculated score
  422. for result in results:
  423. for res_engine in result['engines']:
  424. engines[result['engine']]\
  425. .stats['score_count'] += result['score']
  426. # return results, suggestions, answers and infoboxes
  427. return results, suggestions, answers, infoboxes