__init__.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. from os.path import realpath, dirname, splitext, join
  15. from imp import load_source
  16. import grequests
  17. from itertools import izip_longest, chain
  18. from operator import itemgetter
  19. from urlparse import urlparse
  20. from searx import settings
  21. from searx.utils import gen_useragent
  22. import sys
  23. from datetime import datetime
  24. from flask.ext.babel import gettext
  25. engine_dir = dirname(realpath(__file__))
  26. number_of_searches = 0
  27. engines = {}
  28. categories = {'general': []}
  29. engine_shortcuts = {}
  30. def load_module(filename):
  31. modname = splitext(filename)[0]
  32. if modname in sys.modules:
  33. del sys.modules[modname]
  34. filepath = join(engine_dir, filename)
  35. module = load_source(modname, filepath)
  36. module.name = modname
  37. return module
  38. if not 'engines' in settings or not settings['engines']:
  39. print '[E] Error no engines found. Edit your settings.yml'
  40. exit(2)
  41. for engine_data in settings['engines']:
  42. engine_name = engine_data['engine']
  43. engine = load_module(engine_name + '.py')
  44. for param_name in engine_data:
  45. if param_name == 'engine':
  46. continue
  47. if param_name == 'categories':
  48. if engine_data['categories'] == 'none':
  49. engine.categories = []
  50. else:
  51. engine.categories = map(
  52. str.strip, engine_data['categories'].split(','))
  53. continue
  54. setattr(engine, param_name, engine_data[param_name])
  55. if not hasattr(engine, 'paging'):
  56. engine.paging = False
  57. if not hasattr(engine, 'language_support'):
  58. #engine.language_support = False
  59. engine.language_support = True
  60. if not hasattr(engine, 'shortcut'):
  61. #engine.language_support = False
  62. engine.shortcut = ''
  63. # checking required variables
  64. for engine_attr in dir(engine):
  65. if engine_attr.startswith('_'):
  66. continue
  67. if getattr(engine, engine_attr) is None:
  68. print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr) # noqa
  69. sys.exit(1)
  70. engines[engine.name] = engine
  71. engine.stats = {
  72. 'result_count': 0,
  73. 'search_count': 0,
  74. 'page_load_time': 0,
  75. 'score_count': 0,
  76. 'errors': 0
  77. }
  78. if hasattr(engine, 'categories'):
  79. for category_name in engine.categories:
  80. categories.setdefault(category_name, []).append(engine)
  81. else:
  82. categories['general'].append(engine)
  83. if engine.shortcut:
  84. # TODO check duplications
  85. engine_shortcuts[engine.shortcut] = engine.name
  86. def default_request_params():
  87. return {
  88. 'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
  89. def make_callback(engine_name, results, suggestions, callback, params):
  90. # creating a callback wrapper for the search engine results
  91. def process_callback(response, **kwargs):
  92. cb_res = []
  93. response.search_params = params
  94. engines[engine_name].stats['page_load_time'] += \
  95. (datetime.now() - params['started']).total_seconds()
  96. try:
  97. search_results = callback(response)
  98. except Exception, e:
  99. engines[engine_name].stats['errors'] += 1
  100. results[engine_name] = cb_res
  101. print '[E] Error with engine "{0}":\n\t{1}'.format(
  102. engine_name, str(e))
  103. return
  104. for result in search_results:
  105. result['engine'] = engine_name
  106. if 'suggestion' in result:
  107. # TODO type checks
  108. suggestions.add(result['suggestion'])
  109. continue
  110. cb_res.append(result)
  111. results[engine_name] = cb_res
  112. return process_callback
  113. def score_results(results):
  114. flat_res = filter(
  115. None, chain.from_iterable(izip_longest(*results.values())))
  116. flat_len = len(flat_res)
  117. engines_len = len(results)
  118. results = []
  119. # deduplication + scoring
  120. for i, res in enumerate(flat_res):
  121. res['parsed_url'] = urlparse(res['url'])
  122. res['engines'] = [res['engine']]
  123. weight = 1.0
  124. if hasattr(engines[res['engine']], 'weight'):
  125. weight = float(engines[res['engine']].weight)
  126. score = int((flat_len - i) / engines_len) * weight + 1
  127. duplicated = False
  128. for new_res in results:
  129. p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path # noqa
  130. p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path # noqa
  131. if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\
  132. p1 == p2 and\
  133. res['parsed_url'].query == new_res['parsed_url'].query and\
  134. res.get('template') == new_res.get('template'):
  135. duplicated = new_res
  136. break
  137. if duplicated:
  138. if len(res.get('content', '')) > len(duplicated.get('content', '')): # noqa
  139. duplicated['content'] = res['content']
  140. duplicated['score'] += score
  141. duplicated['engines'].append(res['engine'])
  142. if duplicated['parsed_url'].scheme == 'https':
  143. continue
  144. elif res['parsed_url'].scheme == 'https':
  145. duplicated['url'] = res['parsed_url'].geturl()
  146. duplicated['parsed_url'] = res['parsed_url']
  147. else:
  148. res['score'] = score
  149. results.append(res)
  150. return sorted(results, key=itemgetter('score'), reverse=True)
  151. def search(query, request, selected_engines, pageno=1, lang='all'):
  152. global engines, categories, number_of_searches
  153. requests = []
  154. results = {}
  155. suggestions = set()
  156. number_of_searches += 1
  157. #user_agent = request.headers.get('User-Agent', '')
  158. user_agent = gen_useragent()
  159. for selected_engine in selected_engines:
  160. if selected_engine['name'] not in engines:
  161. continue
  162. engine = engines[selected_engine['name']]
  163. if pageno > 1 and not engine.paging:
  164. continue
  165. if lang != 'all' and not engine.language_support:
  166. continue
  167. request_params = default_request_params()
  168. request_params['headers']['User-Agent'] = user_agent
  169. request_params['category'] = selected_engine['category']
  170. request_params['started'] = datetime.now()
  171. request_params['pageno'] = pageno
  172. request_params['language'] = lang
  173. request_params = engine.request(query, request_params)
  174. callback = make_callback(
  175. selected_engine['name'],
  176. results,
  177. suggestions,
  178. engine.response,
  179. request_params
  180. )
  181. request_args = dict(
  182. headers=request_params['headers'],
  183. hooks=dict(response=callback),
  184. cookies=request_params['cookies'],
  185. timeout=settings['server']['request_timeout']
  186. )
  187. if request_params['method'] == 'GET':
  188. req = grequests.get
  189. else:
  190. req = grequests.post
  191. request_args['data'] = request_params['data']
  192. # ignoring empty urls
  193. if not request_params['url']:
  194. continue
  195. requests.append(req(request_params['url'], **request_args))
  196. grequests.map(requests)
  197. for engine_name, engine_results in results.items():
  198. engines[engine_name].stats['search_count'] += 1
  199. engines[engine_name].stats['result_count'] += len(engine_results)
  200. results = score_results(results)
  201. for result in results:
  202. for res_engine in result['engines']:
  203. engines[result['engine']].stats['score_count'] += result['score']
  204. return results, suggestions
  205. def get_engines_stats():
  206. # TODO refactor
  207. pageloads = []
  208. results = []
  209. scores = []
  210. errors = []
  211. scores_per_result = []
  212. max_pageload = max_results = max_score = max_errors = max_score_per_result = 0 # noqa
  213. for engine in engines.values():
  214. if engine.stats['search_count'] == 0:
  215. continue
  216. results_num = \
  217. engine.stats['result_count'] / float(engine.stats['search_count'])
  218. load_times = engine.stats['page_load_time'] / float(engine.stats['search_count']) # noqa
  219. if results_num:
  220. score = engine.stats['score_count'] / float(engine.stats['search_count']) # noqa
  221. score_per_result = score / results_num
  222. else:
  223. score = score_per_result = 0.0
  224. max_results = max(results_num, max_results)
  225. max_pageload = max(load_times, max_pageload)
  226. max_score = max(score, max_score)
  227. max_score_per_result = max(score_per_result, max_score_per_result)
  228. max_errors = max(max_errors, engine.stats['errors'])
  229. pageloads.append({'avg': load_times, 'name': engine.name})
  230. results.append({'avg': results_num, 'name': engine.name})
  231. scores.append({'avg': score, 'name': engine.name})
  232. errors.append({'avg': engine.stats['errors'], 'name': engine.name})
  233. scores_per_result.append({
  234. 'avg': score_per_result,
  235. 'name': engine.name
  236. })
  237. for engine in pageloads:
  238. engine['percentage'] = int(engine['avg'] / max_pageload * 100)
  239. for engine in results:
  240. engine['percentage'] = int(engine['avg'] / max_results * 100)
  241. for engine in scores:
  242. engine['percentage'] = int(engine['avg'] / max_score * 100)
  243. for engine in scores_per_result:
  244. engine['percentage'] = int(engine['avg'] / max_score_per_result * 100)
  245. for engine in errors:
  246. if max_errors:
  247. engine['percentage'] = int(float(engine['avg']) / max_errors * 100)
  248. else:
  249. engine['percentage'] = 0
  250. return [
  251. (
  252. gettext('Page loads (sec)'),
  253. sorted(pageloads, key=itemgetter('avg'))
  254. ),
  255. (
  256. gettext('Number of results'),
  257. sorted(results, key=itemgetter('avg'), reverse=True)
  258. ),
  259. (
  260. gettext('Scores'),
  261. sorted(scores, key=itemgetter('avg'), reverse=True)
  262. ),
  263. (
  264. gettext('Scores per result'),
  265. sorted(scores_per_result, key=itemgetter('avg'), reverse=True)
  266. ),
  267. (
  268. gettext('Errors'),
  269. sorted(errors, key=itemgetter('avg'), reverse=True)
  270. ),
  271. ]