search.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. import grequests
  15. import re
  16. from itertools import izip_longest, chain
  17. from datetime import datetime
  18. from operator import itemgetter
  19. from urlparse import urlparse, unquote
  20. from searx.engines import (
  21. categories, engines, engine_shortcuts
  22. )
  23. from searx.languages import language_codes
  24. from searx.utils import gen_useragent
  25. from searx.query import Query
  26. number_of_searches = 0
  27. # get default reqest parameter
  28. def default_request_params():
  29. return {
  30. 'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
  31. # create a callback wrapper for the search engine results
  32. def make_callback(engine_name, results, suggestions, answers, infoboxes, callback, params):
  33. # creating a callback wrapper for the search engine results
  34. def process_callback(response, **kwargs):
  35. cb_res = []
  36. response.search_params = params
  37. # callback
  38. try:
  39. search_results = callback(response)
  40. except Exception, e:
  41. # increase errors stats
  42. engines[engine_name].stats['errors'] += 1
  43. results[engine_name] = cb_res
  44. # print engine name and specific error message
  45. print '[E] Error with engine "{0}":\n\t{1}'.format(
  46. engine_name, str(e))
  47. return
  48. # add results
  49. for result in search_results:
  50. result['engine'] = engine_name
  51. # if it is a suggestion, add it to list of suggestions
  52. if 'suggestion' in result:
  53. # TODO type checks
  54. suggestions.add(result['suggestion'])
  55. continue
  56. # if it is an answer, add it to list of answers
  57. if 'answer' in result:
  58. answers.add(result['answer'])
  59. continue
  60. # if it is an infobox, add it to list of infoboxes
  61. if 'infobox' in result:
  62. infoboxes.append(result)
  63. continue
  64. # append result
  65. cb_res.append(result)
  66. results[engine_name] = cb_res
  67. # update stats with current page-load-time
  68. engines[engine_name].stats['page_load_time'] += \
  69. (datetime.now() - params['started']).total_seconds()
  70. return process_callback
  71. # return the meaningful length of the content for a result
  72. def content_result_len(content):
  73. if isinstance(content, basestring):
  74. content = re.sub('[,;:!?\./\\\\ ()-_]', '', content)
  75. return len(content)
  76. else:
  77. return 0
  78. # score results and remove duplications
  79. def score_results(results):
  80. # calculate scoring parameters
  81. flat_res = filter(
  82. None, chain.from_iterable(izip_longest(*results.values())))
  83. flat_len = len(flat_res)
  84. engines_len = len(results)
  85. results = []
  86. # pass 1: deduplication + scoring
  87. for i, res in enumerate(flat_res):
  88. res['parsed_url'] = urlparse(res['url'])
  89. res['host'] = res['parsed_url'].netloc
  90. if res['host'].startswith('www.'):
  91. res['host'] = res['host'].replace('www.', '', 1)
  92. res['engines'] = [res['engine']]
  93. weight = 1.0
  94. # strip multiple spaces and cariage returns from content
  95. if 'content' in res:
  96. res['content'] = re.sub(' +', ' ', res['content'].strip().replace('\n', ''))
  97. # get weight of this engine if possible
  98. if hasattr(engines[res['engine']], 'weight'):
  99. weight = float(engines[res['engine']].weight)
  100. # calculate score for that engine
  101. score = int((flat_len - i) / engines_len) * weight + 1
  102. # check for duplicates
  103. duplicated = False
  104. for new_res in results:
  105. # remove / from the end of the url if required
  106. p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path # noqa
  107. p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path # noqa
  108. # check if that result is a duplicate
  109. if res['host'] == new_res['host'] and\
  110. unquote(p1) == unquote(p2) and\
  111. res['parsed_url'].query == new_res['parsed_url'].query and\
  112. res.get('template') == new_res.get('template'):
  113. duplicated = new_res
  114. break
  115. # merge duplicates together
  116. if duplicated:
  117. # using content with more text
  118. if content_result_len(res.get('content', '')) > content_result_len(duplicated.get('content', '')):
  119. duplicated['content'] = res['content']
  120. # increase result-score
  121. duplicated['score'] += score
  122. # add engine to list of result-engines
  123. duplicated['engines'].append(res['engine'])
  124. # using https if possible
  125. if duplicated['parsed_url'].scheme == 'https':
  126. continue
  127. elif res['parsed_url'].scheme == 'https':
  128. duplicated['url'] = res['parsed_url'].geturl()
  129. duplicated['parsed_url'] = res['parsed_url']
  130. # if there is no duplicate found, append result
  131. else:
  132. res['score'] = score
  133. results.append(res)
  134. results = sorted(results, key=itemgetter('score'), reverse=True)
  135. # pass 2 : group results by category and template
  136. gresults = []
  137. categoryPositions = {}
  138. for i, res in enumerate(results):
  139. # FIXME : handle more than one category per engine
  140. category = engines[res['engine']].categories[0] + ':' + '' if 'template' not in res else res['template']
  141. current = None if category not in categoryPositions else categoryPositions[category]
  142. # group with previous results using the same category if the group can accept more result and is not too far from the current position
  143. if current != None and (current['count'] > 0) and (len(gresults) - current['index'] < 20):
  144. # group with the previous results using the same category with this one
  145. index = current['index']
  146. gresults.insert(index, res)
  147. # update every index after the current one (including the current one)
  148. for k in categoryPositions:
  149. v = categoryPositions[k]['index']
  150. if v >= index:
  151. categoryPositions[k]['index'] = v+1
  152. # update this category
  153. current['count'] -= 1
  154. else:
  155. # same category
  156. gresults.append(res)
  157. # update categoryIndex
  158. categoryPositions[category] = { 'index' : len(gresults), 'count' : 8 }
  159. # return gresults
  160. return gresults
  161. def merge_two_infoboxes(infobox1, infobox2):
  162. if 'urls' in infobox2:
  163. urls1 = infobox1.get('urls', None)
  164. if urls1 == None:
  165. urls1 = []
  166. infobox1.set('urls', urls1)
  167. urlSet = set()
  168. for url in infobox1.get('urls', []):
  169. urlSet.add(url.get('url', None))
  170. for url in infobox2.get('urls', []):
  171. if url.get('url', None) not in urlSet:
  172. urls1.append(url)
  173. if 'attributes' in infobox2:
  174. attributes1 = infobox1.get('attributes', None)
  175. if attributes1 == None:
  176. attributes1 = []
  177. infobox1.set('attributes', attributes1)
  178. attributeSet = set()
  179. for attribute in infobox1.get('attributes', []):
  180. if attribute.get('label', None) not in attributeSet:
  181. attributeSet.add(attribute.get('label', None))
  182. for attribute in infobox2.get('attributes', []):
  183. attributes1.append(attribute)
  184. if 'content' in infobox2:
  185. content1 = infobox1.get('content', None)
  186. content2 = infobox2.get('content', '')
  187. if content1 != None:
  188. if content_result_len(content2) > content_result_len(content1):
  189. infobox1['content'] = content2
  190. else:
  191. infobox1.set('content', content2)
  192. def merge_infoboxes(infoboxes):
  193. results = []
  194. infoboxes_id = {}
  195. for infobox in infoboxes:
  196. add_infobox = True
  197. infobox_id = infobox.get('id', None)
  198. if infobox_id != None:
  199. existingIndex = infoboxes_id.get(infobox_id, None)
  200. if existingIndex != None:
  201. merge_two_infoboxes(results[existingIndex], infobox)
  202. add_infobox=False
  203. if add_infobox:
  204. results.append(infobox)
  205. infoboxes_id[infobox_id] = len(results)-1
  206. return results
  207. class Search(object):
  208. """Search information container"""
  209. def __init__(self, request):
  210. # init vars
  211. super(Search, self).__init__()
  212. self.query = None
  213. self.engines = []
  214. self.categories = []
  215. self.paging = False
  216. self.pageno = 1
  217. self.lang = 'all'
  218. # set blocked engines
  219. if request.cookies.get('blocked_engines'):
  220. self.blocked_engines = request.cookies['blocked_engines'].split(',') # noqa
  221. else:
  222. self.blocked_engines = []
  223. self.results = []
  224. self.suggestions = []
  225. self.answers = []
  226. self.infoboxes = []
  227. self.request_data = {}
  228. # set specific language if set
  229. if request.cookies.get('language')\
  230. and request.cookies['language'] in (x[0] for x in language_codes):
  231. self.lang = request.cookies['language']
  232. # set request method
  233. if request.method == 'POST':
  234. self.request_data = request.form
  235. else:
  236. self.request_data = request.args
  237. # TODO better exceptions
  238. if not self.request_data.get('q'):
  239. raise Exception('noquery')
  240. # set pagenumber
  241. pageno_param = self.request_data.get('pageno', '1')
  242. if not pageno_param.isdigit() or int(pageno_param) < 1:
  243. raise Exception('wrong pagenumber')
  244. self.pageno = int(pageno_param)
  245. # parse query, if tags are set, which change the serch engine or search-language
  246. query_obj = Query(self.request_data['q'], self.blocked_engines)
  247. query_obj.parse_query()
  248. # set query
  249. self.query = query_obj.getSearchQuery()
  250. # get last selected language in query, if possible
  251. # TODO support search with multible languages
  252. if len(query_obj.languages):
  253. self.lang = query_obj.languages[-1]
  254. self.engines = query_obj.engines
  255. self.categories = []
  256. # if engines are calculated from query, set categories by using that informations
  257. if self.engines:
  258. self.categories = list(set(engine['category']
  259. for engine in self.engines))
  260. # otherwise, using defined categories to calculate which engines should be used
  261. else:
  262. # set used categories
  263. for pd_name, pd in self.request_data.items():
  264. if pd_name.startswith('category_'):
  265. category = pd_name[9:]
  266. # if category is not found in list, skip
  267. if not category in categories:
  268. continue
  269. # add category to list
  270. self.categories.append(category)
  271. # if no category is specified for this search, using user-defined default-configuration which (is stored in cookie)
  272. if not self.categories:
  273. cookie_categories = request.cookies.get('categories', '')
  274. cookie_categories = cookie_categories.split(',')
  275. for ccateg in cookie_categories:
  276. if ccateg in categories:
  277. self.categories.append(ccateg)
  278. # if still no category is specified, using general as default-category
  279. if not self.categories:
  280. self.categories = ['general']
  281. # using all engines for that search, which are declared under the specific categories
  282. for categ in self.categories:
  283. self.engines.extend({'category': categ,
  284. 'name': x.name}
  285. for x in categories[categ]
  286. if not x.name in self.blocked_engines)
  287. # do search-request
  288. def search(self, request):
  289. global number_of_searches
  290. # init vars
  291. requests = []
  292. results = {}
  293. suggestions = set()
  294. answers = set()
  295. infoboxes = []
  296. # increase number of searches
  297. number_of_searches += 1
  298. # set default useragent
  299. #user_agent = request.headers.get('User-Agent', '')
  300. user_agent = gen_useragent()
  301. # start search-reqest for all selected engines
  302. for selected_engine in self.engines:
  303. if selected_engine['name'] not in engines:
  304. continue
  305. engine = engines[selected_engine['name']]
  306. # if paging is not supported, skip
  307. if self.pageno > 1 and not engine.paging:
  308. continue
  309. # if search-language is set and engine does not provide language-support, skip
  310. if self.lang != 'all' and not engine.language_support:
  311. continue
  312. # set default request parameters
  313. request_params = default_request_params()
  314. request_params['headers']['User-Agent'] = user_agent
  315. request_params['category'] = selected_engine['category']
  316. request_params['started'] = datetime.now()
  317. request_params['pageno'] = self.pageno
  318. request_params['language'] = self.lang
  319. # update request parameters dependent on search-engine (contained in engines folder)
  320. request_params = engine.request(self.query.encode('utf-8'),
  321. request_params)
  322. if request_params['url'] is None:
  323. # TODO add support of offline engines
  324. pass
  325. # create a callback wrapper for the search engine results
  326. callback = make_callback(
  327. selected_engine['name'],
  328. results,
  329. suggestions,
  330. answers,
  331. infoboxes,
  332. engine.response,
  333. request_params
  334. )
  335. # create dictionary which contain all informations about the request
  336. request_args = dict(
  337. headers=request_params['headers'],
  338. hooks=dict(response=callback),
  339. cookies=request_params['cookies'],
  340. timeout=engine.timeout
  341. )
  342. # specific type of request (GET or POST)
  343. if request_params['method'] == 'GET':
  344. req = grequests.get
  345. else:
  346. req = grequests.post
  347. request_args['data'] = request_params['data']
  348. # ignoring empty urls
  349. if not request_params['url']:
  350. continue
  351. # append request to list
  352. requests.append(req(request_params['url'], **request_args))
  353. # send all search-request
  354. grequests.map(requests)
  355. # update engine-specific stats
  356. for engine_name, engine_results in results.items():
  357. engines[engine_name].stats['search_count'] += 1
  358. engines[engine_name].stats['result_count'] += len(engine_results)
  359. # score results and remove duplications
  360. results = score_results(results)
  361. # merge infoboxes according to their ids
  362. infoboxes = merge_infoboxes(infoboxes)
  363. # update engine stats, using calculated score
  364. for result in results:
  365. for res_engine in result['engines']:
  366. engines[result['engine']]\
  367. .stats['score_count'] += result['score']
  368. # return results, suggestions, answers and infoboxes
  369. return results, suggestions, answers, infoboxes