__init__.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. from os.path import realpath, dirname, splitext, join
  15. from os import listdir
  16. from imp import load_source
  17. import grequests
  18. from itertools import izip_longest, chain
  19. from operator import itemgetter
  20. from urlparse import urlparse
  21. from searx import settings
  22. engine_dir = dirname(realpath(__file__))
  23. engines = {}
  24. categories = {'general': []}
  25. for filename in listdir(engine_dir):
  26. if filename.startswith('_') or not filename.endswith('.py'):
  27. continue
  28. modname = splitext(filename)[0]
  29. if modname in settings.blacklist:
  30. continue
  31. filepath = join(engine_dir, filename)
  32. engine = load_source(modname, filepath)
  33. engine.name = modname
  34. if not hasattr(engine, 'request') or not hasattr(engine, 'response'):
  35. continue
  36. engines[modname] = engine
  37. if not hasattr(engine, 'categories'):
  38. categories['general'].append(engine)
  39. else:
  40. for category_name in engine.categories:
  41. categories.setdefault(category_name, []).append(engine)
  42. def default_request_params():
  43. return {'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
  44. def make_callback(engine_name, results, callback):
  45. def process_callback(response, **kwargs):
  46. cb_res = []
  47. for result in callback(response):
  48. result['engine'] = engine_name
  49. cb_res.append(result)
  50. results[engine_name] = cb_res
  51. return process_callback
  52. def search(query, request, selected_engines):
  53. global engines
  54. requests = []
  55. results = {}
  56. user_agent = request.headers.get('User-Agent', '')
  57. for ename, engine in engines.items():
  58. if ename not in selected_engines:
  59. continue
  60. request_params = default_request_params()
  61. request_params['headers']['User-Agent'] = user_agent
  62. request_params = engine.request(query, request_params)
  63. callback = make_callback(ename, results, engine.response)
  64. if request_params['method'] == 'GET':
  65. req = grequests.get(request_params['url']
  66. ,headers=request_params['headers']
  67. ,hooks=dict(response=callback)
  68. ,cookies = request_params['cookies']
  69. )
  70. else:
  71. req = grequests.post(request_params['url']
  72. ,data=request_params['data']
  73. ,headers=request_params['headers']
  74. ,hooks=dict(response=callback)
  75. ,cookies = request_params['cookies']
  76. )
  77. requests.append(req)
  78. grequests.map(requests)
  79. flat_res = filter(None, chain.from_iterable(izip_longest(*results.values())))
  80. flat_len = len(flat_res)
  81. results = []
  82. # deduplication + scoring
  83. for i,res in enumerate(flat_res):
  84. res['parsed_url'] = urlparse(res['url'])
  85. score = (flat_len - i)*settings.weights.get(res['engine'], 1)
  86. duplicated = False
  87. for new_res in results:
  88. if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\
  89. res['parsed_url'].path == new_res['parsed_url'].path and\
  90. res['parsed_url'].query == new_res['parsed_url'].query:
  91. duplicated = new_res
  92. break
  93. if duplicated:
  94. if len(res.get('content', '')) > len(duplicated.get('content', '')):
  95. duplicated['content'] = res['content']
  96. duplicated['score'] += score
  97. duplicated['engine'] += ', '+res['engine']
  98. if duplicated['parsed_url'].scheme == 'https':
  99. continue
  100. elif res['parsed_url'].scheme == 'https':
  101. duplicated['parsed_url'].scheme == 'https'
  102. duplicated['url'] = duplicated['parsed_url'].geturl()
  103. else:
  104. res['score'] = score
  105. results.append(res)
  106. return sorted(results, key=itemgetter('score'), reverse=True)