__init__.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. # pylint: disable=missing-module-docstring
  4. import typing
  5. import math
  6. import contextlib
  7. from timeit import default_timer
  8. from operator import itemgetter
  9. from searx.engines import engines
  10. from .models import HistogramStorage, CounterStorage
  11. from .error_recorder import count_error, count_exception, errors_per_engines
  12. __all__ = [
  13. "initialize",
  14. "get_engines_stats",
  15. "get_engine_errors",
  16. "histogram",
  17. "histogram_observe",
  18. "histogram_observe_time",
  19. "counter",
  20. "counter_inc",
  21. "counter_add",
  22. "count_error",
  23. "count_exception",
  24. ]
  25. ENDPOINTS = {'search'}
  26. histogram_storage: typing.Optional[HistogramStorage] = None
  27. counter_storage: typing.Optional[CounterStorage] = None
  28. @contextlib.contextmanager
  29. def histogram_observe_time(*args):
  30. h = histogram_storage.get(*args)
  31. before = default_timer()
  32. yield before
  33. duration = default_timer() - before
  34. if h:
  35. h.observe(duration)
  36. else:
  37. raise ValueError("histogram " + repr((*args,)) + " doesn't not exist")
  38. def histogram_observe(duration, *args):
  39. histogram_storage.get(*args).observe(duration)
  40. def histogram(*args, raise_on_not_found=True):
  41. h = histogram_storage.get(*args)
  42. if raise_on_not_found and h is None:
  43. raise ValueError("histogram " + repr((*args,)) + " doesn't not exist")
  44. return h
  45. def counter_inc(*args):
  46. counter_storage.add(1, *args)
  47. def counter_add(value, *args):
  48. counter_storage.add(value, *args)
  49. def counter(*args):
  50. return counter_storage.get(*args)
  51. def initialize(engine_names=None):
  52. """
  53. Initialize metrics
  54. """
  55. global counter_storage, histogram_storage # pylint: disable=global-statement
  56. counter_storage = CounterStorage()
  57. histogram_storage = HistogramStorage()
  58. # max_timeout = max of all the engine.timeout
  59. max_timeout = 2
  60. for engine_name in engine_names or engines:
  61. if engine_name in engines:
  62. max_timeout = max(max_timeout, engines[engine_name].timeout)
  63. # histogram configuration
  64. histogram_width = 0.1
  65. histogram_size = int(1.5 * max_timeout / histogram_width)
  66. # engines
  67. for engine_name in engine_names or engines:
  68. # search count
  69. counter_storage.configure('engine', engine_name, 'search', 'count', 'sent')
  70. counter_storage.configure('engine', engine_name, 'search', 'count', 'successful')
  71. # global counter of errors
  72. counter_storage.configure('engine', engine_name, 'search', 'count', 'error')
  73. # score of the engine
  74. counter_storage.configure('engine', engine_name, 'score')
  75. # result count per requests
  76. histogram_storage.configure(1, 100, 'engine', engine_name, 'result', 'count')
  77. # time doing HTTP requests
  78. histogram_storage.configure(histogram_width, histogram_size, 'engine', engine_name, 'time', 'http')
  79. # total time
  80. # .time.request and ...response times may overlap .time.http time.
  81. histogram_storage.configure(histogram_width, histogram_size, 'engine', engine_name, 'time', 'total')
  82. def get_engine_errors(engline_name_list):
  83. result = {}
  84. engine_names = list(errors_per_engines.keys())
  85. engine_names.sort()
  86. for engine_name in engine_names:
  87. if engine_name not in engline_name_list:
  88. continue
  89. error_stats = errors_per_engines[engine_name]
  90. sent_search_count = max(counter('engine', engine_name, 'search', 'count', 'sent'), 1)
  91. sorted_context_count_list = sorted(error_stats.items(), key=lambda context_count: context_count[1])
  92. r = []
  93. for context, count in sorted_context_count_list:
  94. percentage = round(20 * count / sent_search_count) * 5
  95. r.append(
  96. {
  97. 'filename': context.filename,
  98. 'function': context.function,
  99. 'line_no': context.line_no,
  100. 'code': context.code,
  101. 'exception_classname': context.exception_classname,
  102. 'log_message': context.log_message,
  103. 'log_parameters': context.log_parameters,
  104. 'secondary': context.secondary,
  105. 'percentage': percentage,
  106. }
  107. )
  108. result[engine_name] = sorted(r, reverse=True, key=lambda d: d['percentage'])
  109. return result
  110. def get_reliabilities(engline_name_list, checker_results):
  111. reliabilities = {}
  112. engine_errors = get_engine_errors(engline_name_list)
  113. for engine_name in engline_name_list:
  114. checker_result = checker_results.get(engine_name, {})
  115. checker_success = checker_result.get('success', True)
  116. errors = engine_errors.get(engine_name) or []
  117. if counter('engine', engine_name, 'search', 'count', 'sent') == 0:
  118. # no request
  119. reliablity = None
  120. elif checker_success and not errors:
  121. reliablity = 100
  122. elif 'simple' in checker_result.get('errors', {}):
  123. # the basic (simple) test doesn't work: the engine is broken accoding to the checker
  124. # even if there is no exception
  125. reliablity = 0
  126. else:
  127. reliablity = 100 - sum([error['percentage'] for error in errors if not error.get('secondary')])
  128. reliabilities[engine_name] = {
  129. 'reliablity': reliablity,
  130. 'errors': errors,
  131. 'checker': checker_results.get(engine_name, {}).get('errors', {}),
  132. }
  133. return reliabilities
  134. def get_engines_stats(engine_name_list):
  135. assert counter_storage is not None
  136. assert histogram_storage is not None
  137. list_time = []
  138. max_time_total = max_result_count = None
  139. for engine_name in engine_name_list:
  140. sent_count = counter('engine', engine_name, 'search', 'count', 'sent')
  141. if sent_count == 0:
  142. continue
  143. result_count = histogram('engine', engine_name, 'result', 'count').percentage(50)
  144. result_count_sum = histogram('engine', engine_name, 'result', 'count').sum
  145. successful_count = counter('engine', engine_name, 'search', 'count', 'successful')
  146. time_total = histogram('engine', engine_name, 'time', 'total').percentage(50)
  147. max_time_total = max(time_total or 0, max_time_total or 0)
  148. max_result_count = max(result_count or 0, max_result_count or 0)
  149. stats = {
  150. 'name': engine_name,
  151. 'total': None,
  152. 'total_p80': None,
  153. 'total_p95': None,
  154. 'http': None,
  155. 'http_p80': None,
  156. 'http_p95': None,
  157. 'processing': None,
  158. 'processing_p80': None,
  159. 'processing_p95': None,
  160. 'score': 0,
  161. 'score_per_result': 0,
  162. 'result_count': result_count,
  163. }
  164. if successful_count and result_count_sum:
  165. score = counter('engine', engine_name, 'score')
  166. stats['score'] = score
  167. stats['score_per_result'] = score / float(result_count_sum)
  168. time_http = histogram('engine', engine_name, 'time', 'http').percentage(50)
  169. time_http_p80 = time_http_p95 = 0
  170. if time_http is not None:
  171. time_http_p80 = histogram('engine', engine_name, 'time', 'http').percentage(80)
  172. time_http_p95 = histogram('engine', engine_name, 'time', 'http').percentage(95)
  173. stats['http'] = round(time_http, 1)
  174. stats['http_p80'] = round(time_http_p80, 1)
  175. stats['http_p95'] = round(time_http_p95, 1)
  176. if time_total is not None:
  177. time_total_p80 = histogram('engine', engine_name, 'time', 'total').percentage(80)
  178. time_total_p95 = histogram('engine', engine_name, 'time', 'total').percentage(95)
  179. stats['total'] = round(time_total, 1)
  180. stats['total_p80'] = round(time_total_p80, 1)
  181. stats['total_p95'] = round(time_total_p95, 1)
  182. stats['processing'] = round(time_total - (time_http or 0), 1)
  183. stats['processing_p80'] = round(time_total_p80 - time_http_p80, 1)
  184. stats['processing_p95'] = round(time_total_p95 - time_http_p95, 1)
  185. list_time.append(stats)
  186. return {
  187. 'time': list_time,
  188. 'max_time': math.ceil(max_time_total or 0),
  189. 'max_result_count': math.ceil(max_result_count or 0),
  190. }