| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550 | 
							- '''
 
- searx is free software: you can redistribute it and/or modify
 
- it under the terms of the GNU Affero General Public License as published by
 
- the Free Software Foundation, either version 3 of the License, or
 
- (at your option) any later version.
 
- searx is distributed in the hope that it will be useful,
 
- but WITHOUT ANY WARRANTY; without even the implied warranty of
 
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
- GNU Affero General Public License for more details.
 
- You should have received a copy of the GNU Affero General Public License
 
- along with searx. If not, see < http://www.gnu.org/licenses/ >.
 
- (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
 
- '''
 
- import gc
 
- import sys
 
- import threading
 
- from time import time
 
- from uuid import uuid4
 
- from flask_babel import gettext
 
- import requests.exceptions
 
- import searx.poolrequests as requests_lib
 
- from searx.engines import (
 
-     categories, engines, settings
 
- )
 
- from searx.answerers import ask
 
- from searx.utils import gen_useragent
 
- from searx.query import RawTextQuery, SearchQuery, VALID_LANGUAGE_CODE
 
- from searx.results import ResultContainer
 
- from searx import logger
 
- from searx.plugins import plugins
 
- from searx.exceptions import SearxParameterException
 
- try:
 
-     from thread import start_new_thread
 
- except:
 
-     from _thread import start_new_thread
 
- if sys.version_info[0] == 3:
 
-     unicode = str
 
- logger = logger.getChild('search')
 
- number_of_searches = 0
 
- max_request_timeout = settings.get('outgoing', {}).get('max_request_timeout' or None)
 
- if max_request_timeout is None:
 
-     logger.info('max_request_timeout={0}'.format(max_request_timeout))
 
- else:
 
-     if isinstance(max_request_timeout, float):
 
-         logger.info('max_request_timeout={0} second(s)'.format(max_request_timeout))
 
-     else:
 
-         logger.critical('outgoing.max_request_timeout if defined has to be float')
 
-         from sys import exit
 
-         exit(1)
 
- def send_http_request(engine, request_params):
 
-     # create dictionary which contain all
 
-     # informations about the request
 
-     request_args = dict(
 
-         headers=request_params['headers'],
 
-         cookies=request_params['cookies'],
 
-         verify=request_params['verify']
 
-     )
 
-     # specific type of request (GET or POST)
 
-     if request_params['method'] == 'GET':
 
-         req = requests_lib.get
 
-     else:
 
-         req = requests_lib.post
 
-         request_args['data'] = request_params['data']
 
-     # send the request
 
-     return req(request_params['url'], **request_args)
 
- def search_one_http_request(engine, query, request_params):
 
-     # update request parameters dependent on
 
-     # search-engine (contained in engines folder)
 
-     engine.request(query, request_params)
 
-     # ignoring empty urls
 
-     if request_params['url'] is None:
 
-         return None
 
-     if not request_params['url']:
 
-         return None
 
-     # send request
 
-     response = send_http_request(engine, request_params)
 
-     # parse the response
 
-     response.search_params = request_params
 
-     return engine.response(response)
 
- def search_one_offline_request(engine, query, request_params):
 
-     return engine.search(query, request_params)
 
- def search_one_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit):
 
-     if engines[engine_name].offline:
 
-         return search_one_offline_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit)  # noqa
 
-     return search_one_http_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit)
 
- def search_one_offline_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit):
 
-     engine = engines[engine_name]
 
-     try:
 
-         search_results = search_one_offline_request(engine, query, request_params)
 
-         if search_results:
 
-             result_container.extend(engine_name, search_results)
 
-             engine_time = time() - start_time
 
-             result_container.add_timing(engine_name, engine_time, engine_time)
 
-             with threading.RLock():
 
-                 engine.stats['engine_time'] += engine_time
 
-                 engine.stats['engine_time_count'] += 1
 
-     except ValueError as e:
 
-         record_offline_engine_stats_on_error(engine, result_container, start_time)
 
-         logger.exception('engine {0} : invalid input : {1}'.format(engine_name, e))
 
-     except Exception as e:
 
-         record_offline_engine_stats_on_error(engine, result_container, start_time)
 
-         result_container.add_unresponsive_engine((
 
-             engine_name,
 
-             u'{0}: {1}'.format(gettext('unexpected crash'), e),
 
-         ))
 
-         logger.exception('engine {0} : exception : {1}'.format(engine_name, e))
 
- def record_offline_engine_stats_on_error(engine, result_container, start_time):
 
-     engine_time = time() - start_time
 
-     result_container.add_timing(engine.name, engine_time, engine_time)
 
-     with threading.RLock():
 
-         engine.stats['errors'] += 1
 
- def search_one_http_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit):
 
-     # set timeout for all HTTP requests
 
-     requests_lib.set_timeout_for_thread(timeout_limit, start_time=start_time)
 
-     # reset the HTTP total time
 
-     requests_lib.reset_time_for_thread()
 
-     #
 
-     engine = engines[engine_name]
 
-     # suppose everything will be alright
 
-     requests_exception = False
 
-     try:
 
-         # send requests and parse the results
 
-         search_results = search_one_http_request(engine, query, request_params)
 
-         # check if the engine accepted the request
 
-         if search_results is not None:
 
-             # yes, so add results
 
-             result_container.extend(engine_name, search_results)
 
-             # update engine time when there is no exception
 
-             engine_time = time() - start_time
 
-             page_load_time = requests_lib.get_time_for_thread()
 
-             result_container.add_timing(engine_name, engine_time, page_load_time)
 
-             with threading.RLock():
 
-                 engine.stats['engine_time'] += engine_time
 
-                 engine.stats['engine_time_count'] += 1
 
-                 # update stats with the total HTTP time
 
-                 engine.stats['page_load_time'] += page_load_time
 
-                 engine.stats['page_load_count'] += 1
 
-     except Exception as e:
 
-         # Timing
 
-         engine_time = time() - start_time
 
-         page_load_time = requests_lib.get_time_for_thread()
 
-         result_container.add_timing(engine_name, engine_time, page_load_time)
 
-         # Record the errors
 
-         with threading.RLock():
 
-             engine.stats['errors'] += 1
 
-         if (issubclass(e.__class__, requests.exceptions.Timeout)):
 
-             result_container.add_unresponsive_engine((engine_name, gettext('timeout')))
 
-             # requests timeout (connect or read)
 
-             logger.error("engine {0} : HTTP requests timeout"
 
-                          "(search duration : {1} s, timeout: {2} s) : {3}"
 
-                          .format(engine_name, engine_time, timeout_limit, e.__class__.__name__))
 
-             requests_exception = True
 
-         elif (issubclass(e.__class__, requests.exceptions.RequestException)):
 
-             result_container.add_unresponsive_engine((engine_name, gettext('request exception')))
 
-             # other requests exception
 
-             logger.exception("engine {0} : requests exception"
 
-                              "(search duration : {1} s, timeout: {2} s) : {3}"
 
-                              .format(engine_name, engine_time, timeout_limit, e))
 
-             requests_exception = True
 
-         else:
 
-             result_container.add_unresponsive_engine((
 
-                 engine_name,
 
-                 u'{0}: {1}'.format(gettext('unexpected crash'), e),
 
-             ))
 
-             # others errors
 
-             logger.exception('engine {0} : exception : {1}'.format(engine_name, e))
 
-     # suspend or not the engine if there are HTTP errors
 
-     with threading.RLock():
 
-         if requests_exception:
 
-             # update continuous_errors / suspend_end_time
 
-             engine.continuous_errors += 1
 
-             engine.suspend_end_time = time() + min(settings['search']['max_ban_time_on_fail'],
 
-                                                    engine.continuous_errors * settings['search']['ban_time_on_fail'])
 
-         else:
 
-             # no HTTP error (perhaps an engine error)
 
-             # anyway, reset the suspend variables
 
-             engine.continuous_errors = 0
 
-             engine.suspend_end_time = 0
 
- def search_multiple_requests(requests, result_container, start_time, timeout_limit):
 
-     search_id = uuid4().__str__()
 
-     for engine_name, query, request_params in requests:
 
-         th = threading.Thread(
 
-             target=search_one_request_safe,
 
-             args=(engine_name, query, request_params, result_container, start_time, timeout_limit),
 
-             name=search_id,
 
-         )
 
-         th._engine_name = engine_name
 
-         th.start()
 
-     for th in threading.enumerate():
 
-         if th.name == search_id:
 
-             remaining_time = max(0.0, timeout_limit - (time() - start_time))
 
-             th.join(remaining_time)
 
-             if th.isAlive():
 
-                 result_container.add_unresponsive_engine((th._engine_name, gettext('timeout')))
 
-                 logger.warning('engine timeout: {0}'.format(th._engine_name))
 
- # get default reqest parameter
 
- def default_request_params():
 
-     return {
 
-         'method': 'GET',
 
-         'headers': {},
 
-         'data': {},
 
-         'url': '',
 
-         'cookies': {},
 
-         'verify': True
 
-     }
 
- # remove duplicate queries.
 
- # FIXME: does not fix "!music !soundcloud", because the categories are 'none' and 'music'
 
- def deduplicate_query_engines(query_engines):
 
-     uniq_query_engines = {q["category"] + '|' + q["name"]: q for q in query_engines}
 
-     return uniq_query_engines.values()
 
- def get_search_query_from_webapp(preferences, form):
 
-     # no text for the query ?
 
-     if not form.get('q'):
 
-         raise SearxParameterException('q', '')
 
-     # set blocked engines
 
-     disabled_engines = preferences.engines.get_disabled()
 
-     # parse query, if tags are set, which change
 
-     # the serch engine or search-language
 
-     raw_text_query = RawTextQuery(form['q'], disabled_engines)
 
-     raw_text_query.parse_query()
 
-     # set query
 
-     query = raw_text_query.getSearchQuery()
 
-     # get and check page number
 
-     pageno_param = form.get('pageno', '1')
 
-     if not pageno_param.isdigit() or int(pageno_param) < 1:
 
-         raise SearxParameterException('pageno', pageno_param)
 
-     query_pageno = int(pageno_param)
 
-     # get language
 
-     # set specific language if set on request, query or preferences
 
-     # TODO support search with multible languages
 
-     if len(raw_text_query.languages):
 
-         query_lang = raw_text_query.languages[-1]
 
-     elif 'language' in form:
 
-         query_lang = form.get('language')
 
-     else:
 
-         query_lang = preferences.get_value('language')
 
-     # check language
 
-     if not VALID_LANGUAGE_CODE.match(query_lang):
 
-         raise SearxParameterException('language', query_lang)
 
-     # get safesearch
 
-     if 'safesearch' in form:
 
-         query_safesearch = form.get('safesearch')
 
-         # first check safesearch
 
-         if not query_safesearch.isdigit():
 
-             raise SearxParameterException('safesearch', query_safesearch)
 
-         query_safesearch = int(query_safesearch)
 
-     else:
 
-         query_safesearch = preferences.get_value('safesearch')
 
-     # safesearch : second check
 
-     if query_safesearch < 0 or query_safesearch > 2:
 
-         raise SearxParameterException('safesearch', query_safesearch)
 
-     # get time_range
 
-     query_time_range = form.get('time_range')
 
-     # check time_range
 
-     if query_time_range not in ('None', None, '', 'day', 'week', 'month', 'year'):
 
-         raise SearxParameterException('time_range', query_time_range)
 
-     # query_engines
 
-     query_engines = raw_text_query.engines
 
-     # timeout_limit
 
-     query_timeout = raw_text_query.timeout_limit
 
-     if query_timeout is None and 'timeout_limit' in form:
 
-         raw_time_limit = form.get('timeout_limit')
 
-         if raw_time_limit in ['None', '']:
 
-             raw_time_limit = None
 
-         else:
 
-             try:
 
-                 query_timeout = float(raw_time_limit)
 
-             except ValueError:
 
-                 raise SearxParameterException('timeout_limit', raw_time_limit)
 
-     # query_categories
 
-     query_categories = []
 
-     # if engines are calculated from query,
 
-     # set categories by using that informations
 
-     if query_engines and raw_text_query.specific:
 
-         additional_categories = set()
 
-         for engine in query_engines:
 
-             if 'from_bang' in engine and engine['from_bang']:
 
-                 additional_categories.add('none')
 
-             else:
 
-                 additional_categories.add(engine['category'])
 
-         query_categories = list(additional_categories)
 
-     # otherwise, using defined categories to
 
-     # calculate which engines should be used
 
-     else:
 
-         # set categories/engines
 
-         load_default_categories = True
 
-         for pd_name, pd in form.items():
 
-             if pd_name == 'categories':
 
-                 query_categories.extend(categ for categ in map(unicode.strip, pd.split(',')) if categ in categories)
 
-             elif pd_name == 'engines':
 
-                 pd_engines = [{'category': engines[engine].categories[0],
 
-                                'name': engine}
 
-                               for engine in map(unicode.strip, pd.split(',')) if engine in engines]
 
-                 if pd_engines:
 
-                     query_engines.extend(pd_engines)
 
-                     load_default_categories = False
 
-             elif pd_name.startswith('category_'):
 
-                 category = pd_name[9:]
 
-                 # if category is not found in list, skip
 
-                 if category not in categories:
 
-                     continue
 
-                 if pd != 'off':
 
-                     # add category to list
 
-                     query_categories.append(category)
 
-                 elif category in query_categories:
 
-                     # remove category from list if property is set to 'off'
 
-                     query_categories.remove(category)
 
-         if not load_default_categories:
 
-             if not query_categories:
 
-                 query_categories = list(set(engine['category']
 
-                                             for engine in query_engines))
 
-         else:
 
-             # if no category is specified for this search,
 
-             # using user-defined default-configuration which
 
-             # (is stored in cookie)
 
-             if not query_categories:
 
-                 cookie_categories = preferences.get_value('categories')
 
-                 for ccateg in cookie_categories:
 
-                     if ccateg in categories:
 
-                         query_categories.append(ccateg)
 
-             # if still no category is specified, using general
 
-             # as default-category
 
-             if not query_categories:
 
-                 query_categories = ['general']
 
-             # using all engines for that search, which are
 
-             # declared under the specific categories
 
-             for categ in query_categories:
 
-                 query_engines.extend({'category': categ,
 
-                                       'name': engine.name}
 
-                                      for engine in categories[categ]
 
-                                      if (engine.name, categ) not in disabled_engines)
 
-     query_engines = deduplicate_query_engines(query_engines)
 
-     return (SearchQuery(query, query_engines, query_categories,
 
-                         query_lang, query_safesearch, query_pageno,
 
-                         query_time_range, query_timeout, preferences),
 
-             raw_text_query)
 
- class Search(object):
 
-     """Search information container"""
 
-     def __init__(self, search_query):
 
-         # init vars
 
-         super(Search, self).__init__()
 
-         self.search_query = search_query
 
-         self.result_container = ResultContainer()
 
-         self.actual_timeout = None
 
-     # do search-request
 
-     def search(self):
 
-         global number_of_searches
 
-         # start time
 
-         start_time = time()
 
-         # answeres ?
 
-         answerers_results = ask(self.search_query)
 
-         if answerers_results:
 
-             for results in answerers_results:
 
-                 self.result_container.extend('answer', results)
 
-             return self.result_container
 
-         # init vars
 
-         requests = []
 
-         # increase number of searches
 
-         number_of_searches += 1
 
-         # set default useragent
 
-         # user_agent = request.headers.get('User-Agent', '')
 
-         user_agent = gen_useragent()
 
-         search_query = self.search_query
 
-         # max of all selected engine timeout
 
-         default_timeout = 0
 
-         # start search-reqest for all selected engines
 
-         for selected_engine in search_query.engines:
 
-             if selected_engine['name'] not in engines:
 
-                 continue
 
-             engine = engines[selected_engine['name']]
 
-             if not search_query.preferences.validate_token(engine):
 
-                 continue
 
-             # skip suspended engines
 
-             if engine.suspend_end_time >= time():
 
-                 logger.debug('Engine currently suspended: %s', selected_engine['name'])
 
-                 continue
 
-             # if paging is not supported, skip
 
-             if search_query.pageno > 1 and not engine.paging:
 
-                 continue
 
-             # if time_range is not supported, skip
 
-             if search_query.time_range and not engine.time_range_support:
 
-                 continue
 
-             # set default request parameters
 
-             request_params = {}
 
-             if not engine.offline:
 
-                 request_params = default_request_params()
 
-                 request_params['headers']['User-Agent'] = user_agent
 
-                 if hasattr(engine, 'language') and engine.language:
 
-                     request_params['language'] = engine.language
 
-                 else:
 
-                     request_params['language'] = search_query.lang
 
-                 request_params['safesearch'] = search_query.safesearch
 
-                 request_params['time_range'] = search_query.time_range
 
-             request_params['category'] = selected_engine['category']
 
-             request_params['pageno'] = search_query.pageno
 
-             # append request to list
 
-             requests.append((selected_engine['name'], search_query.query, request_params))
 
-             # update default_timeout
 
-             default_timeout = max(default_timeout, engine.timeout)
 
-         # adjust timeout
 
-         self.actual_timeout = default_timeout
 
-         query_timeout = self.search_query.timeout_limit
 
-         if max_request_timeout is None and query_timeout is None:
 
-             # No max, no user query: default_timeout
 
-             pass
 
-         elif max_request_timeout is None and query_timeout is not None:
 
-             # No max, but user query: From user query except if above default
 
-             self.actual_timeout = min(default_timeout, query_timeout)
 
-         elif max_request_timeout is not None and query_timeout is None:
 
-             # Max, no user query: Default except if above max
 
-             self.actual_timeout = min(default_timeout, max_request_timeout)
 
-         elif max_request_timeout is not None and query_timeout is not None:
 
-             # Max & user query: From user query except if above max
 
-             self.actual_timeout = min(query_timeout, max_request_timeout)
 
-         logger.debug("actual_timeout={0} (default_timeout={1}, ?timeout_limit={2}, max_request_timeout={3})"
 
-                      .format(self.actual_timeout, default_timeout, query_timeout, max_request_timeout))
 
-         # send all search-request
 
-         if requests:
 
-             search_multiple_requests(requests, self.result_container, start_time, self.actual_timeout)
 
-             start_new_thread(gc.collect, tuple())
 
-         # return results, suggestions, answers and infoboxes
 
-         return self.result_container
 
- class SearchWithPlugins(Search):
 
-     """Similar to the Search class but call the plugins."""
 
-     def __init__(self, search_query, ordered_plugin_list, request):
 
-         super(SearchWithPlugins, self).__init__(search_query)
 
-         self.ordered_plugin_list = ordered_plugin_list
 
-         self.request = request
 
-     def search(self):
 
-         if plugins.call(self.ordered_plugin_list, 'pre_search', self.request, self):
 
-             super(SearchWithPlugins, self).search()
 
-         plugins.call(self.ordered_plugin_list, 'post_search', self.request, self)
 
-         results = self.result_container.get_ordered_results()
 
-         for result in results:
 
-             plugins.call(self.ordered_plugin_list, 'on_result', self.request, self, result)
 
-         return self.result_container
 
 
  |