search.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. import grequests
  15. import re
  16. from itertools import izip_longest, chain
  17. from datetime import datetime
  18. from operator import itemgetter
  19. from urlparse import urlparse, unquote
  20. from searx.engines import (
  21. categories, engines, engine_shortcuts
  22. )
  23. from searx.languages import language_codes
  24. from searx.utils import gen_useragent
  25. number_of_searches = 0
  26. # get default reqest parameter
  27. def default_request_params():
  28. return {
  29. 'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
  30. # create a callback wrapper for the search engine results
  31. def make_callback(engine_name, results, suggestions, callback, params):
  32. # creating a callback wrapper for the search engine results
  33. def process_callback(response, **kwargs):
  34. cb_res = []
  35. response.search_params = params
  36. # update stats with current page-load-time
  37. engines[engine_name].stats['page_load_time'] += \
  38. (datetime.now() - params['started']).total_seconds()
  39. try:
  40. search_results = callback(response)
  41. except Exception, e:
  42. # increase errors stats
  43. engines[engine_name].stats['errors'] += 1
  44. results[engine_name] = cb_res
  45. # print engine name and specific error message
  46. print '[E] Error with engine "{0}":\n\t{1}'.format(
  47. engine_name, str(e))
  48. return
  49. for result in search_results:
  50. result['engine'] = engine_name
  51. # if it is a suggestion, add it to list of suggestions
  52. if 'suggestion' in result:
  53. # TODO type checks
  54. suggestions.add(result['suggestion'])
  55. continue
  56. # append result
  57. cb_res.append(result)
  58. results[engine_name] = cb_res
  59. return process_callback
  60. # return the meaningful length of the content for a result
  61. def content_result_len(result):
  62. if isinstance(result.get('content'), basestring):
  63. content = re.sub('[,;:!?\./\\\\ ()-_]', '', result.get('content'))
  64. return len(content)
  65. else:
  66. return 0
  67. # score results and remove duplications
  68. def score_results(results):
  69. # calculate scoring parameters
  70. flat_res = filter(
  71. None, chain.from_iterable(izip_longest(*results.values())))
  72. flat_len = len(flat_res)
  73. engines_len = len(results)
  74. results = []
  75. # pass 1: deduplication + scoring
  76. for i, res in enumerate(flat_res):
  77. res['parsed_url'] = urlparse(res['url'])
  78. res['host'] = res['parsed_url'].netloc
  79. if res['host'].startswith('www.'):
  80. res['host'] = res['host'].replace('www.', '', 1)
  81. res['engines'] = [res['engine']]
  82. weight = 1.0
  83. # strip multiple spaces and cariage returns from content
  84. if 'content' in res:
  85. res['content'] = re.sub(' +', ' ', res['content'].strip().replace('\n', ''))
  86. # get weight of this engine if possible
  87. if hasattr(engines[res['engine']], 'weight'):
  88. weight = float(engines[res['engine']].weight)
  89. # calculate score for that engine
  90. score = int((flat_len - i) / engines_len) * weight + 1
  91. # check for duplicates
  92. duplicated = False
  93. for new_res in results:
  94. # remove / from the end of the url if required
  95. p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path # noqa
  96. p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path # noqa
  97. # check if that result is a duplicate
  98. if res['host'] == new_res['host'] and\
  99. unquote(p1) == unquote(p2) and\
  100. res['parsed_url'].query == new_res['parsed_url'].query and\
  101. res.get('template') == new_res.get('template'):
  102. duplicated = new_res
  103. break
  104. # merge duplicates together
  105. if duplicated:
  106. # using content with more text
  107. if content_result_len(res) > content_result_len(duplicated):
  108. duplicated['content'] = res['content']
  109. # increase result-score
  110. duplicated['score'] += score
  111. # add engine to list of result-engines
  112. duplicated['engines'].append(res['engine'])
  113. # using https if possible
  114. if duplicated['parsed_url'].scheme == 'https':
  115. continue
  116. elif res['parsed_url'].scheme == 'https':
  117. duplicated['url'] = res['parsed_url'].geturl()
  118. duplicated['parsed_url'] = res['parsed_url']
  119. # if there is no duplicate found, append result
  120. else:
  121. res['score'] = score
  122. results.append(res)
  123. results = sorted(results, key=itemgetter('score'), reverse=True)
  124. # pass 2 : group results by category and template
  125. gresults = []
  126. categoryPositions = {}
  127. for i, res in enumerate(results):
  128. # FIXME : handle more than one category per engine
  129. category = engines[res['engine']].categories[0] + ':' + '' if 'template' not in res else res['template']
  130. current = None if category not in categoryPositions else categoryPositions[category]
  131. # group with previous results using the same category if the group can accept more result and is not too far from the current position
  132. if current != None and (current['count'] > 0) and (len(gresults) - current['index'] < 20):
  133. # group with the previous results using the same category with this one
  134. index = current['index']
  135. gresults.insert(index, res)
  136. # update every index after the current one (including the current one)
  137. for k in categoryPositions:
  138. v = categoryPositions[k]['index']
  139. if v >= index:
  140. categoryPositions[k]['index'] = v+1
  141. # update this category
  142. current['count'] -= 1
  143. else:
  144. # same category
  145. gresults.append(res)
  146. # update categoryIndex
  147. categoryPositions[category] = { 'index' : len(gresults), 'count' : 8 }
  148. # return gresults
  149. return gresults
  150. class Search(object):
  151. """Search information container"""
  152. def __init__(self, request):
  153. # init vars
  154. super(Search, self).__init__()
  155. self.query = None
  156. self.engines = []
  157. self.categories = []
  158. self.paging = False
  159. self.pageno = 1
  160. self.lang = 'all'
  161. # set blocked engines
  162. if request.cookies.get('blocked_engines'):
  163. self.blocked_engines = request.cookies['blocked_engines'].split(',') # noqa
  164. else:
  165. self.blocked_engines = []
  166. self.results = []
  167. self.suggestions = []
  168. self.request_data = {}
  169. # set specific language if set
  170. if request.cookies.get('language')\
  171. and request.cookies['language'] in (x[0] for x in language_codes):
  172. self.lang = request.cookies['language']
  173. # set request method
  174. if request.method == 'POST':
  175. self.request_data = request.form
  176. else:
  177. self.request_data = request.args
  178. # TODO better exceptions
  179. if not self.request_data.get('q'):
  180. raise Exception('noquery')
  181. # set query
  182. self.query = self.request_data['q']
  183. # set pagenumber
  184. pageno_param = self.request_data.get('pageno', '1')
  185. if not pageno_param.isdigit() or int(pageno_param) < 1:
  186. raise Exception('wrong pagenumber')
  187. self.pageno = int(pageno_param)
  188. # parse query, if tags are set, which change the serch engine or search-language
  189. self.parse_query()
  190. self.categories = []
  191. # if engines are calculated from query, set categories by using that informations
  192. if self.engines:
  193. self.categories = list(set(engine['category']
  194. for engine in self.engines))
  195. # otherwise, using defined categories to calculate which engines should be used
  196. else:
  197. # set used categories
  198. for pd_name, pd in self.request_data.items():
  199. if pd_name.startswith('category_'):
  200. category = pd_name[9:]
  201. # if category is not found in list, skip
  202. if not category in categories:
  203. continue
  204. # add category to list
  205. self.categories.append(category)
  206. # if no category is specified for this search, using user-defined default-configuration which (is stored in cookie)
  207. if not self.categories:
  208. cookie_categories = request.cookies.get('categories', '')
  209. cookie_categories = cookie_categories.split(',')
  210. for ccateg in cookie_categories:
  211. if ccateg in categories:
  212. self.categories.append(ccateg)
  213. # if still no category is specified, using general as default-category
  214. if not self.categories:
  215. self.categories = ['general']
  216. # using all engines for that search, which are declared under the specific categories
  217. for categ in self.categories:
  218. self.engines.extend({'category': categ,
  219. 'name': x.name}
  220. for x in categories[categ]
  221. if not x.name in self.blocked_engines)
  222. # parse query, if tags are set, which change the serch engine or search-language
  223. def parse_query(self):
  224. query_parts = self.query.split()
  225. modified = False
  226. # check if language-prefix is set
  227. if query_parts[0].startswith(':'):
  228. lang = query_parts[0][1:].lower()
  229. # check if any language-code is equal with declared language-codes
  230. for lc in language_codes:
  231. lang_id, lang_name, country = map(str.lower, lc)
  232. # if correct language-code is found, set it as new search-language
  233. if lang == lang_id\
  234. or lang_id.startswith(lang)\
  235. or lang == lang_name\
  236. or lang == country:
  237. self.lang = lang
  238. modified = True
  239. break
  240. # check if category/engine prefix is set
  241. elif query_parts[0].startswith('!'):
  242. prefix = query_parts[0][1:].replace('_', ' ')
  243. # check if prefix is equal with engine shortcut
  244. if prefix in engine_shortcuts\
  245. and not engine_shortcuts[prefix] in self.blocked_engines:
  246. modified = True
  247. self.engines.append({'category': 'none',
  248. 'name': engine_shortcuts[prefix]})
  249. # check if prefix is equal with engine name
  250. elif prefix in engines\
  251. and not prefix in self.blocked_engines:
  252. modified = True
  253. self.engines.append({'category': 'none',
  254. 'name': prefix})
  255. # check if prefix is equal with categorie name
  256. elif prefix in categories:
  257. modified = True
  258. # using all engines for that search, which are declared under that categorie name
  259. self.engines.extend({'category': prefix,
  260. 'name': engine.name}
  261. for engine in categories[prefix]
  262. if not engine in self.blocked_engines)
  263. # if language, category or engine were specificed in this query, search for more tags which does the same
  264. if modified:
  265. self.query = self.query.replace(query_parts[0], '', 1).strip()
  266. self.parse_query()
  267. # do search-request
  268. def search(self, request):
  269. global number_of_searches
  270. # init vars
  271. requests = []
  272. results = {}
  273. suggestions = set()
  274. # increase number of searches
  275. number_of_searches += 1
  276. # set default useragent
  277. #user_agent = request.headers.get('User-Agent', '')
  278. user_agent = gen_useragent()
  279. # start search-reqest for all selected engines
  280. for selected_engine in self.engines:
  281. if selected_engine['name'] not in engines:
  282. continue
  283. engine = engines[selected_engine['name']]
  284. # if paging is not supported, skip
  285. if self.pageno > 1 and not engine.paging:
  286. continue
  287. # if search-language is set and engine does not provide language-support, skip
  288. if self.lang != 'all' and not engine.language_support:
  289. continue
  290. # set default request parameters
  291. request_params = default_request_params()
  292. request_params['headers']['User-Agent'] = user_agent
  293. request_params['category'] = selected_engine['category']
  294. request_params['started'] = datetime.now()
  295. request_params['pageno'] = self.pageno
  296. request_params['language'] = self.lang
  297. # update request parameters dependent on search-engine (contained in engines folder)
  298. request_params = engine.request(self.query.encode('utf-8'),
  299. request_params)
  300. if request_params['url'] is None:
  301. # TODO add support of offline engines
  302. pass
  303. # create a callback wrapper for the search engine results
  304. callback = make_callback(
  305. selected_engine['name'],
  306. results,
  307. suggestions,
  308. engine.response,
  309. request_params
  310. )
  311. # create dictionary which contain all informations about the request
  312. request_args = dict(
  313. headers=request_params['headers'],
  314. hooks=dict(response=callback),
  315. cookies=request_params['cookies'],
  316. timeout=engine.timeout
  317. )
  318. # specific type of request (GET or POST)
  319. if request_params['method'] == 'GET':
  320. req = grequests.get
  321. else:
  322. req = grequests.post
  323. request_args['data'] = request_params['data']
  324. # ignoring empty urls
  325. if not request_params['url']:
  326. continue
  327. # append request to list
  328. requests.append(req(request_params['url'], **request_args))
  329. # send all search-request
  330. grequests.map(requests)
  331. # update engine-specific stats
  332. for engine_name, engine_results in results.items():
  333. engines[engine_name].stats['search_count'] += 1
  334. engines[engine_name].stats['result_count'] += len(engine_results)
  335. # score results and remove duplications
  336. results = score_results(results)
  337. # update engine stats, using calculated score
  338. for result in results:
  339. for res_engine in result['engines']:
  340. engines[result['engine']]\
  341. .stats['score_count'] += result['score']
  342. # return results and suggestions
  343. return results, suggestions