__init__.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. from os.path import realpath, dirname, splitext, join
  15. from imp import load_source
  16. import grequests
  17. from itertools import izip_longest, chain
  18. from operator import itemgetter
  19. from urlparse import urlparse
  20. from searx import settings
  21. import ConfigParser
  22. import sys
  23. from datetime import datetime
  24. engine_dir = dirname(realpath(__file__))
  25. searx_dir = join(engine_dir, '../../')
  26. engines_config = ConfigParser.SafeConfigParser()
  27. engines_config.read(join(searx_dir, 'engines.cfg'))
  28. engines = {}
  29. categories = {'general': []}
  30. def load_module(filename):
  31. modname = splitext(filename)[0]
  32. if modname in sys.modules:
  33. del sys.modules[modname]
  34. filepath = join(engine_dir, filename)
  35. module = load_source(modname, filepath)
  36. module.name = modname
  37. return module
  38. if not engines_config.sections():
  39. print '[E] Error no engines found. Edit your engines.cfg'
  40. exit(2)
  41. for section in engines_config.sections():
  42. engine_data = engines_config.options(section)
  43. engine = load_module(engines_config.get(section, 'engine')+'.py')
  44. engine.name = section
  45. for param_name in engine_data:
  46. if param_name == 'engine':
  47. continue
  48. if param_name == 'categories':
  49. engine.categories = map(str.strip, engines_config.get(section, param_name).split(','))
  50. continue
  51. setattr(engine, param_name, engines_config.get(section, param_name))
  52. for engine_attr in dir(engine):
  53. if engine_attr.startswith('_'):
  54. continue
  55. if getattr(engine, engine_attr) == None:
  56. print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr)
  57. sys.exit(1)
  58. engines[engine.name] = engine
  59. engine.stats = {'result_count': 0, 'search_count': 0, 'page_load_time': 0}
  60. if hasattr(engine, 'categories'):
  61. for category_name in engine.categories:
  62. categories.setdefault(category_name, []).append(engine)
  63. else:
  64. categories['general'].append(engine)
  65. def default_request_params():
  66. return {'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
  67. def make_callback(engine_name, results, callback, params):
  68. def process_callback(response, **kwargs):
  69. cb_res = []
  70. response.search_params = params
  71. engines[engine_name].stats['page_load_time'] += (datetime.now() - params['started']).total_seconds()
  72. for result in callback(response):
  73. result['engine'] = engine_name
  74. cb_res.append(result)
  75. results[engine_name] = cb_res
  76. return process_callback
  77. def search(query, request, selected_categories):
  78. global engines, categories
  79. requests = []
  80. results = {}
  81. selected_engines = []
  82. user_agent = request.headers.get('User-Agent', '')
  83. if not len(selected_categories):
  84. selected_categories = ['general']
  85. for categ in selected_categories:
  86. selected_engines.extend({'category': categ, 'name': x.name} for x in categories[categ])
  87. for selected_engine in selected_engines:
  88. if selected_engine['name'] not in engines:
  89. continue
  90. engine = engines[selected_engine['name']]
  91. request_params = default_request_params()
  92. request_params['headers']['User-Agent'] = user_agent
  93. request_params['category'] = selected_engine['category']
  94. request_params['started'] = datetime.now()
  95. request_params = engine.request(query, request_params)
  96. callback = make_callback(selected_engine['name'], results, engine.response, request_params)
  97. if request_params['method'] == 'GET':
  98. req = grequests.get(request_params['url']
  99. ,headers=request_params['headers']
  100. ,hooks=dict(response=callback)
  101. ,cookies = request_params['cookies']
  102. )
  103. else:
  104. req = grequests.post(request_params['url']
  105. ,data=request_params['data']
  106. ,headers=request_params['headers']
  107. ,hooks=dict(response=callback)
  108. ,cookies = request_params['cookies']
  109. )
  110. requests.append(req)
  111. grequests.map(requests)
  112. for engine_name,engine_results in results.items():
  113. engines[engine_name].stats['search_count'] += 1
  114. engines[engine_name].stats['result_count'] += len(engine_results)
  115. flat_res = filter(None, chain.from_iterable(izip_longest(*results.values())))
  116. flat_len = len(flat_res)
  117. results = []
  118. # deduplication + scoring
  119. for i,res in enumerate(flat_res):
  120. res['parsed_url'] = urlparse(res['url'])
  121. score = (flat_len - i)*settings.weights.get(res['engine'], 1)
  122. duplicated = False
  123. for new_res in results:
  124. if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\
  125. res['parsed_url'].path == new_res['parsed_url'].path and\
  126. res['parsed_url'].query == new_res['parsed_url'].query and\
  127. res.get('template') == new_res.get('template'):
  128. duplicated = new_res
  129. break
  130. if duplicated:
  131. if len(res.get('content', '')) > len(duplicated.get('content', '')):
  132. duplicated['content'] = res['content']
  133. duplicated['score'] += score
  134. duplicated['engine'] += ', '+res['engine']
  135. if duplicated['parsed_url'].scheme == 'https':
  136. continue
  137. elif res['parsed_url'].scheme == 'https':
  138. duplicated['parsed_url'].scheme == 'https'
  139. duplicated['url'] = duplicated['parsed_url'].geturl()
  140. else:
  141. res['score'] = score
  142. results.append(res)
  143. return sorted(results, key=itemgetter('score'), reverse=True)
  144. def get_engines_stats():
  145. pageloads = []
  146. results = []
  147. max_pageload = max_results = 0
  148. for engine in engines.values():
  149. if engine.stats['search_count'] == 0:
  150. continue
  151. results_num = engine.stats['result_count']/float(engine.stats['search_count'])
  152. load_times = engine.stats['page_load_time']/float(engine.stats['search_count'])
  153. max_results = max(results_num, max_results)
  154. max_pageload = max(load_times, max_pageload)
  155. pageloads.append({'avg': load_times, 'name': engine.name})
  156. results.append({'avg': results_num, 'name': engine.name})
  157. for engine in pageloads:
  158. engine['percentage'] = int(engine['avg']/max_pageload*100)
  159. for engine in results:
  160. engine['percentage'] = int(engine['avg']/max_results*100)
  161. return [('Page loads', sorted(pageloads, key=itemgetter('avg'), reverse=True))
  162. ,('Number of results', sorted(results, key=itemgetter('avg'), reverse=True))
  163. ]