__init__.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. from os.path import realpath, dirname, splitext, join
  15. from imp import load_source
  16. import grequests
  17. from itertools import izip_longest, chain
  18. from operator import itemgetter
  19. from urlparse import urlparse
  20. from searx import settings
  21. from searx.utils import gen_useragent
  22. import sys
  23. from datetime import datetime
  24. engine_dir = dirname(realpath(__file__))
  25. number_of_searches = 0
  26. engines = {}
  27. categories = {'general': []}
  28. def load_module(filename):
  29. modname = splitext(filename)[0]
  30. if modname in sys.modules:
  31. del sys.modules[modname]
  32. filepath = join(engine_dir, filename)
  33. module = load_source(modname, filepath)
  34. module.name = modname
  35. return module
  36. if not 'engines' in settings or not settings['engines']:
  37. print '[E] Error no engines found. Edit your settings.yml'
  38. exit(2)
  39. for engine_data in settings['engines']:
  40. engine_name = engine_data['engine']
  41. engine = load_module(engine_name+'.py')
  42. for param_name in engine_data:
  43. if param_name == 'engine':
  44. continue
  45. if param_name == 'categories':
  46. if engine_data['categories'] == 'none':
  47. engine.categories = []
  48. else:
  49. engine.categories = map(str.strip, engine_data['categories'].split(','))
  50. continue
  51. setattr(engine, param_name, engine_data[param_name])
  52. for engine_attr in dir(engine):
  53. if engine_attr.startswith('_'):
  54. continue
  55. if getattr(engine, engine_attr) == None:
  56. print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr)
  57. sys.exit(1)
  58. engines[engine.name] = engine
  59. engine.stats = {'result_count': 0, 'search_count': 0, 'page_load_time': 0, 'score_count': 0, 'errors': 0}
  60. if hasattr(engine, 'categories'):
  61. for category_name in engine.categories:
  62. categories.setdefault(category_name, []).append(engine)
  63. else:
  64. categories['general'].append(engine)
  65. def default_request_params():
  66. return {'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
  67. def make_callback(engine_name, results, suggestions, callback, params):
  68. # creating a callback wrapper for the search engine results
  69. def process_callback(response, **kwargs):
  70. cb_res = []
  71. response.search_params = params
  72. engines[engine_name].stats['page_load_time'] += (datetime.now() - params['started']).total_seconds()
  73. try:
  74. search_results = callback(response)
  75. except Exception, e:
  76. engines[engine_name].stats['errors'] += 1
  77. results[engine_name] = cb_res
  78. print '[E] Error with engine "{0}":\n\t{1}'.format(engine_name, str(e))
  79. return
  80. for result in search_results:
  81. result['engine'] = engine_name
  82. if 'suggestion' in result:
  83. # TODO type checks
  84. suggestions.add(result['suggestion'])
  85. continue
  86. cb_res.append(result)
  87. results[engine_name] = cb_res
  88. return process_callback
  89. def score_results(results):
  90. flat_res = filter(None, chain.from_iterable(izip_longest(*results.values())))
  91. flat_len = len(flat_res)
  92. engines_len = len(results)
  93. results = []
  94. # deduplication + scoring
  95. for i,res in enumerate(flat_res):
  96. res['parsed_url'] = urlparse(res['url'])
  97. res['engines'] = [res['engine']]
  98. weight = 1.0
  99. if hasattr(engines[res['engine']], 'weight'):
  100. weight = float(engines[res['engine']].weight)
  101. score = int((flat_len - i)/engines_len)*weight+1
  102. duplicated = False
  103. for new_res in results:
  104. p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path
  105. p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path
  106. if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\
  107. p1 == p2 and\
  108. res['parsed_url'].query == new_res['parsed_url'].query and\
  109. res.get('template') == new_res.get('template'):
  110. duplicated = new_res
  111. break
  112. if duplicated:
  113. if len(res.get('content', '')) > len(duplicated.get('content', '')):
  114. duplicated['content'] = res['content']
  115. duplicated['score'] += score
  116. duplicated['engines'].append(res['engine'])
  117. if duplicated['parsed_url'].scheme == 'https':
  118. continue
  119. elif res['parsed_url'].scheme == 'https':
  120. duplicated['url'] = res['parsed_url'].geturl()
  121. duplicated['parsed_url'] = res['parsed_url']
  122. else:
  123. res['score'] = score
  124. results.append(res)
  125. return sorted(results, key=itemgetter('score'), reverse=True)
  126. def search(query, request, selected_engines):
  127. global engines, categories, number_of_searches
  128. requests = []
  129. results = {}
  130. suggestions = set()
  131. number_of_searches += 1
  132. #user_agent = request.headers.get('User-Agent', '')
  133. user_agent = gen_useragent()
  134. for selected_engine in selected_engines:
  135. if selected_engine['name'] not in engines:
  136. continue
  137. engine = engines[selected_engine['name']]
  138. request_params = default_request_params()
  139. request_params['headers']['User-Agent'] = user_agent
  140. request_params['category'] = selected_engine['category']
  141. request_params['started'] = datetime.now()
  142. request_params = engine.request(query, request_params)
  143. callback = make_callback(selected_engine['name'], results, suggestions, engine.response, request_params)
  144. request_args = dict(headers = request_params['headers']
  145. ,hooks = dict(response=callback)
  146. ,cookies = request_params['cookies']
  147. ,timeout = settings['server']['request_timeout']
  148. )
  149. if request_params['method'] == 'GET':
  150. req = grequests.get
  151. else:
  152. req = grequests.post
  153. request_args['data'] = request_params['data']
  154. # ignoring empty urls
  155. if not request_params['url']:
  156. continue
  157. requests.append(req(request_params['url'], **request_args))
  158. grequests.map(requests)
  159. for engine_name,engine_results in results.items():
  160. engines[engine_name].stats['search_count'] += 1
  161. engines[engine_name].stats['result_count'] += len(engine_results)
  162. results = score_results(results)
  163. for result in results:
  164. for res_engine in result['engines']:
  165. engines[result['engine']].stats['score_count'] += result['score']
  166. return results, suggestions
  167. def get_engines_stats():
  168. # TODO refactor
  169. pageloads = []
  170. results = []
  171. scores = []
  172. errors = []
  173. scores_per_result = []
  174. max_pageload = max_results = max_score = max_errors = max_score_per_result = 0
  175. for engine in engines.values():
  176. if engine.stats['search_count'] == 0:
  177. continue
  178. results_num = engine.stats['result_count']/float(engine.stats['search_count'])
  179. load_times = engine.stats['page_load_time']/float(engine.stats['search_count'])
  180. if results_num:
  181. score = engine.stats['score_count'] / float(engine.stats['search_count'])
  182. score_per_result = score / results_num
  183. else:
  184. score = score_per_result = 0.0
  185. max_results = max(results_num, max_results)
  186. max_pageload = max(load_times, max_pageload)
  187. max_score = max(score, max_score)
  188. max_score_per_result = max(score_per_result, max_score_per_result)
  189. max_errors = max(max_errors, engine.stats['errors'])
  190. pageloads.append({'avg': load_times, 'name': engine.name})
  191. results.append({'avg': results_num, 'name': engine.name})
  192. scores.append({'avg': score, 'name': engine.name})
  193. errors.append({'avg': engine.stats['errors'], 'name': engine.name})
  194. scores_per_result.append({'avg': score_per_result, 'name': engine.name})
  195. for engine in pageloads:
  196. engine['percentage'] = int(engine['avg']/max_pageload*100)
  197. for engine in results:
  198. engine['percentage'] = int(engine['avg']/max_results*100)
  199. for engine in scores:
  200. engine['percentage'] = int(engine['avg']/max_score*100)
  201. for engine in scores_per_result:
  202. engine['percentage'] = int(engine['avg']/max_score_per_result*100)
  203. for engine in errors:
  204. if max_errors:
  205. engine['percentage'] = int(float(engine['avg'])/max_errors*100)
  206. else:
  207. engine['percentage'] = 0
  208. return [('Page loads (sec)', sorted(pageloads, key=itemgetter('avg')))
  209. ,('Number of results', sorted(results, key=itemgetter('avg'), reverse=True))
  210. ,('Scores', sorted(scores, key=itemgetter('avg'), reverse=True))
  211. ,('Scores per result', sorted(scores_per_result, key=itemgetter('avg'), reverse=True))
  212. ,('Errors', sorted(errors, key=itemgetter('avg'), reverse=True))
  213. ]