__init__.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. import typing
  3. import math
  4. import contextlib
  5. from timeit import default_timer
  6. from operator import itemgetter
  7. from searx.engines import engines
  8. from .models import HistogramStorage, CounterStorage
  9. from .error_recorder import count_error, count_exception, errors_per_engines
  10. __all__ = ["initialize",
  11. "get_engines_stats", "get_engine_errors",
  12. "histogram", "histogram_observe", "histogram_observe_time",
  13. "counter", "counter_inc", "counter_add",
  14. "count_error", "count_exception"]
  15. ENDPOINTS = {'search'}
  16. histogram_storage: typing.Optional[HistogramStorage] = None
  17. counter_storage: typing.Optional[CounterStorage] = None
  18. @contextlib.contextmanager
  19. def histogram_observe_time(*args):
  20. h = histogram_storage.get(*args)
  21. before = default_timer()
  22. yield before
  23. duration = default_timer() - before
  24. if h:
  25. h.observe(duration)
  26. else:
  27. raise ValueError("histogram " + repr((*args,)) + " doesn't not exist")
  28. def histogram_observe(duration, *args):
  29. histogram_storage.get(*args).observe(duration)
  30. def histogram(*args, raise_on_not_found=True):
  31. h = histogram_storage.get(*args)
  32. if raise_on_not_found and h is None:
  33. raise ValueError("histogram " + repr((*args,)) + " doesn't not exist")
  34. return h
  35. def counter_inc(*args):
  36. counter_storage.add(1, *args)
  37. def counter_add(value, *args):
  38. counter_storage.add(value, *args)
  39. def counter(*args):
  40. return counter_storage.get(*args)
  41. def initialize(engine_names=None):
  42. """
  43. Initialize metrics
  44. """
  45. global counter_storage, histogram_storage
  46. counter_storage = CounterStorage()
  47. histogram_storage = HistogramStorage()
  48. # max_timeout = max of all the engine.timeout
  49. max_timeout = 2
  50. for engine_name in (engine_names or engines):
  51. if engine_name in engines:
  52. max_timeout = max(max_timeout, engines[engine_name].timeout)
  53. # histogram configuration
  54. histogram_width = 0.1
  55. histogram_size = int(1.5 * max_timeout / histogram_width)
  56. # engines
  57. for engine_name in (engine_names or engines):
  58. # search count
  59. counter_storage.configure('engine', engine_name, 'search', 'count', 'sent')
  60. counter_storage.configure('engine', engine_name, 'search', 'count', 'successful')
  61. # global counter of errors
  62. counter_storage.configure('engine', engine_name, 'search', 'count', 'error')
  63. # score of the engine
  64. counter_storage.configure('engine', engine_name, 'score')
  65. # result count per requests
  66. histogram_storage.configure(1, 100, 'engine', engine_name, 'result', 'count')
  67. # time doing HTTP requests
  68. histogram_storage.configure(histogram_width, histogram_size, 'engine', engine_name, 'time', 'http')
  69. # total time
  70. # .time.request and ...response times may overlap .time.http time.
  71. histogram_storage.configure(histogram_width, histogram_size, 'engine', engine_name, 'time', 'total')
  72. def get_engine_errors(engline_list):
  73. result = {}
  74. engine_names = list(errors_per_engines.keys())
  75. engine_names.sort()
  76. for engine_name in engine_names:
  77. if engine_name not in engline_list:
  78. continue
  79. error_stats = errors_per_engines[engine_name]
  80. sent_search_count = max(counter('engine', engine_name, 'search', 'count', 'sent'), 1)
  81. sorted_context_count_list = sorted(error_stats.items(), key=lambda context_count: context_count[1])
  82. r = []
  83. for context, count in sorted_context_count_list:
  84. percentage = round(20 * count / sent_search_count) * 5
  85. r.append({
  86. 'filename': context.filename,
  87. 'function': context.function,
  88. 'line_no': context.line_no,
  89. 'code': context.code,
  90. 'exception_classname': context.exception_classname,
  91. 'log_message': context.log_message,
  92. 'log_parameters': context.log_parameters,
  93. 'secondary': context.secondary,
  94. 'percentage': percentage,
  95. })
  96. result[engine_name] = sorted(r, reverse=True, key=lambda d: d['percentage'])
  97. return result
  98. def to_percentage(stats, maxvalue):
  99. for engine_stat in stats:
  100. if maxvalue:
  101. engine_stat['percentage'] = int(engine_stat['avg'] / maxvalue * 100)
  102. else:
  103. engine_stat['percentage'] = 0
  104. return stats
  105. def get_engines_stats(engine_list):
  106. global counter_storage, histogram_storage
  107. assert counter_storage is not None
  108. assert histogram_storage is not None
  109. list_time = []
  110. list_time_http = []
  111. list_time_total = []
  112. list_result_count = []
  113. list_error_count = []
  114. list_scores = []
  115. list_scores_per_result = []
  116. max_error_count = max_http_time = max_time_total = max_result_count = max_score = None # noqa
  117. for engine_name in engine_list:
  118. error_count = counter('engine', engine_name, 'search', 'count', 'error')
  119. if counter('engine', engine_name, 'search', 'count', 'sent') > 0:
  120. list_error_count.append({'avg': error_count, 'name': engine_name})
  121. max_error_count = max(error_count, max_error_count or 0)
  122. successful_count = counter('engine', engine_name, 'search', 'count', 'successful')
  123. if successful_count == 0:
  124. continue
  125. result_count_sum = histogram('engine', engine_name, 'result', 'count').sum
  126. time_total = histogram('engine', engine_name, 'time', 'total').percentage(50)
  127. time_http = histogram('engine', engine_name, 'time', 'http').percentage(50)
  128. result_count = result_count_sum / float(successful_count)
  129. if result_count:
  130. score = counter('engine', engine_name, 'score') # noqa
  131. score_per_result = score / float(result_count_sum)
  132. else:
  133. score = score_per_result = 0.0
  134. max_time_total = max(time_total, max_time_total or 0)
  135. max_http_time = max(time_http, max_http_time or 0)
  136. max_result_count = max(result_count, max_result_count or 0)
  137. max_score = max(score, max_score or 0)
  138. list_time.append({'total': round(time_total, 1),
  139. 'http': round(time_http, 1),
  140. 'name': engine_name,
  141. 'processing': round(time_total - time_http, 1)})
  142. list_time_total.append({'avg': time_total, 'name': engine_name})
  143. list_time_http.append({'avg': time_http, 'name': engine_name})
  144. list_result_count.append({'avg': result_count, 'name': engine_name})
  145. list_scores.append({'avg': score, 'name': engine_name})
  146. list_scores_per_result.append({'avg': score_per_result, 'name': engine_name})
  147. list_time = sorted(list_time, key=itemgetter('total'))
  148. list_time_total = sorted(to_percentage(list_time_total, max_time_total), key=itemgetter('avg'))
  149. list_time_http = sorted(to_percentage(list_time_http, max_http_time), key=itemgetter('avg'))
  150. list_result_count = sorted(to_percentage(list_result_count, max_result_count), key=itemgetter('avg'), reverse=True)
  151. list_scores = sorted(list_scores, key=itemgetter('avg'), reverse=True)
  152. list_scores_per_result = sorted(list_scores_per_result, key=itemgetter('avg'), reverse=True)
  153. list_error_count = sorted(to_percentage(list_error_count, max_error_count), key=itemgetter('avg'), reverse=True)
  154. return {
  155. 'time': list_time,
  156. 'max_time': math.ceil(max_time_total or 0),
  157. 'time_total': list_time_total,
  158. 'time_http': list_time_http,
  159. 'result_count': list_result_count,
  160. 'scores': list_scores,
  161. 'scores_per_result': list_scores_per_result,
  162. 'error_count': list_error_count,
  163. }