__init__.py 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. from os.path import realpath, dirname, splitext, join
  15. from imp import load_source
  16. import grequests
  17. from itertools import izip_longest, chain
  18. from operator import itemgetter
  19. from urlparse import urlparse
  20. from searx import settings
  21. import ConfigParser
  22. import sys
  23. from datetime import datetime
  24. engine_dir = dirname(realpath(__file__))
  25. searx_dir = join(engine_dir, '../../')
  26. engines_config = ConfigParser.SafeConfigParser()
  27. engines_config.read(join(searx_dir, 'engines.cfg'))
  28. number_of_searches = 0
  29. engines = {}
  30. categories = {'general': []}
  31. def load_module(filename):
  32. modname = splitext(filename)[0]
  33. if modname in sys.modules:
  34. del sys.modules[modname]
  35. filepath = join(engine_dir, filename)
  36. module = load_source(modname, filepath)
  37. module.name = modname
  38. return module
  39. if not engines_config.sections():
  40. print '[E] Error no engines found. Edit your engines.cfg'
  41. exit(2)
  42. for section in engines_config.sections():
  43. engine_data = engines_config.options(section)
  44. engine = load_module(engines_config.get(section, 'engine')+'.py')
  45. engine.name = section
  46. for param_name in engine_data:
  47. if param_name == 'engine':
  48. continue
  49. if param_name == 'categories':
  50. if engines_config.get(section, param_name) == 'none':
  51. engine.categories = []
  52. else:
  53. engine.categories = map(str.strip, engines_config.get(section, param_name).split(','))
  54. continue
  55. setattr(engine, param_name, engines_config.get(section, param_name))
  56. for engine_attr in dir(engine):
  57. if engine_attr.startswith('_'):
  58. continue
  59. if getattr(engine, engine_attr) == None:
  60. print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr)
  61. sys.exit(1)
  62. engines[engine.name] = engine
  63. engine.stats = {'result_count': 0, 'search_count': 0, 'page_load_time': 0, 'score_count': 0, 'errors': 0}
  64. if hasattr(engine, 'categories'):
  65. for category_name in engine.categories:
  66. categories.setdefault(category_name, []).append(engine)
  67. else:
  68. categories['general'].append(engine)
  69. def default_request_params():
  70. return {'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
  71. def make_callback(engine_name, results, callback, params):
  72. # creating a callback wrapper for the search engine results
  73. def process_callback(response, **kwargs):
  74. cb_res = []
  75. response.search_params = params
  76. engines[engine_name].stats['page_load_time'] += (datetime.now() - params['started']).total_seconds()
  77. try:
  78. search_results = callback(response)
  79. except Exception, e:
  80. engines[engine_name].stats['errors'] += 1
  81. results[engine_name] = cb_res
  82. print '[E] Error with engine "{0}":\n\t{1}'.format(engine_name, str(e))
  83. return
  84. for result in search_results:
  85. result['engine'] = engine_name
  86. cb_res.append(result)
  87. results[engine_name] = cb_res
  88. return process_callback
  89. def highlight_content(content, query):
  90. # ignoring html contents
  91. # TODO better html content detection
  92. if content.find('<') != -1:
  93. return content
  94. for chunk in query.split():
  95. content = content.replace(chunk, '<b>{0}</b>'.format(chunk))
  96. return content
  97. def search(query, request, selected_engines):
  98. global engines, categories, number_of_searches
  99. requests = []
  100. results = {}
  101. number_of_searches += 1
  102. user_agent = request.headers.get('User-Agent', '')
  103. for selected_engine in selected_engines:
  104. if selected_engine['name'] not in engines:
  105. continue
  106. engine = engines[selected_engine['name']]
  107. request_params = default_request_params()
  108. request_params['headers']['User-Agent'] = user_agent
  109. request_params['category'] = selected_engine['category']
  110. request_params['started'] = datetime.now()
  111. request_params = engine.request(query, request_params)
  112. callback = make_callback(selected_engine['name'], results, engine.response, request_params)
  113. request_args = dict(headers = request_params['headers']
  114. ,hooks = dict(response=callback)
  115. ,cookies = request_params['cookies']
  116. ,timeout = settings.request_timeout
  117. )
  118. if request_params['method'] == 'GET':
  119. req = grequests.get
  120. else:
  121. req = grequests.post
  122. request_args['data'] = request_params['data']
  123. # ignoring empty urls
  124. if not request_params['url']:
  125. continue
  126. requests.append(req(request_params['url'], **request_args))
  127. grequests.map(requests)
  128. for engine_name,engine_results in results.items():
  129. engines[engine_name].stats['search_count'] += 1
  130. engines[engine_name].stats['result_count'] += len(engine_results)
  131. flat_res = filter(None, chain.from_iterable(izip_longest(*results.values())))
  132. flat_len = len(flat_res)
  133. engines_len = len(selected_engines)
  134. results = []
  135. # deduplication + scoring
  136. for i,res in enumerate(flat_res):
  137. res['parsed_url'] = urlparse(res['url'])
  138. res['engines'] = [res['engine']]
  139. score = int((flat_len - i)/engines_len)*settings.weights.get(res['engine'], 1)+1
  140. duplicated = False
  141. for new_res in results:
  142. p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path
  143. p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path
  144. if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\
  145. p1 == p2 and\
  146. res['parsed_url'].query == new_res['parsed_url'].query and\
  147. res.get('template') == new_res.get('template'):
  148. duplicated = new_res
  149. break
  150. if duplicated:
  151. if len(res.get('content', '')) > len(duplicated.get('content', '')):
  152. duplicated['content'] = res['content']
  153. duplicated['score'] += score
  154. duplicated['engines'].append(res['engine'])
  155. if duplicated['parsed_url'].scheme == 'https':
  156. continue
  157. elif res['parsed_url'].scheme == 'https':
  158. duplicated['url'] = res['parsed_url'].geturl()
  159. duplicated['parsed_url'] = res['parsed_url']
  160. else:
  161. res['score'] = score
  162. results.append(res)
  163. for result in results:
  164. if 'content' in result:
  165. result['content'] = highlight_content(result['content'], query)
  166. for res_engine in result['engines']:
  167. engines[result['engine']].stats['score_count'] += result['score']
  168. return sorted(results, key=itemgetter('score'), reverse=True)
  169. def get_engines_stats():
  170. pageloads = []
  171. results = []
  172. scores = []
  173. errors = []
  174. max_pageload = max_results = max_score = max_errors = 0
  175. for engine in engines.values():
  176. if engine.stats['search_count'] == 0:
  177. continue
  178. results_num = engine.stats['result_count']/float(engine.stats['search_count'])
  179. load_times = engine.stats['page_load_time']/float(engine.stats['search_count'])
  180. if results_num:
  181. score = engine.stats['score_count'] / float(engine.stats['search_count'])
  182. else:
  183. score = 0
  184. max_results = max(results_num, max_results)
  185. max_pageload = max(load_times, max_pageload)
  186. max_score = max(score, max_score)
  187. max_errors = max(max_errors, engine.stats['errors'])
  188. pageloads.append({'avg': load_times, 'name': engine.name})
  189. results.append({'avg': results_num, 'name': engine.name})
  190. scores.append({'avg': score, 'name': engine.name})
  191. errors.append({'avg': engine.stats['errors'], 'name': engine.name})
  192. for engine in pageloads:
  193. engine['percentage'] = int(engine['avg']/max_pageload*100)
  194. for engine in results:
  195. engine['percentage'] = int(engine['avg']/max_results*100)
  196. for engine in scores:
  197. engine['percentage'] = int(engine['avg']/max_score*100)
  198. for engine in errors:
  199. if max_errors:
  200. engine['percentage'] = int(engine['avg']/max_errors*100)
  201. else:
  202. engine['percentage'] = 0
  203. return [('Page loads (sec)', sorted(pageloads, key=itemgetter('avg')))
  204. ,('Number of results', sorted(results, key=itemgetter('avg'), reverse=True))
  205. ,('Scores', sorted(scores, key=itemgetter('avg'), reverse=True))
  206. ,('Errors', sorted(errors, key=itemgetter('avg'), reverse=True))
  207. ]