utils.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. # pyright: basic
  4. """Utility functions for the engines
  5. """
  6. import re
  7. import importlib
  8. import importlib.util
  9. import types
  10. from typing import Optional, Union, Any, Set, List, Dict, MutableMapping, Tuple, Callable
  11. from numbers import Number
  12. from os.path import splitext, join
  13. from random import choice
  14. from html.parser import HTMLParser
  15. from urllib.parse import urljoin, urlparse
  16. from lxml import html
  17. from lxml.etree import ElementBase, XPath, XPathError, XPathSyntaxError, _ElementStringResult, _ElementUnicodeResult
  18. from babel.core import get_global
  19. from searx import settings
  20. from searx.data import USER_AGENTS
  21. from searx.version import VERSION_TAG
  22. from searx.languages import language_codes
  23. from searx.exceptions import SearxXPathSyntaxException, SearxEngineXPathException
  24. from searx import logger
  25. logger = logger.getChild('utils')
  26. XPathSpecType = Union[str, XPath]
  27. _BLOCKED_TAGS = ('script', 'style')
  28. _ECMA_UNESCAPE4_RE = re.compile(r'%u([0-9a-fA-F]{4})', re.UNICODE)
  29. _ECMA_UNESCAPE2_RE = re.compile(r'%([0-9a-fA-F]{2})', re.UNICODE)
  30. _STORAGE_UNIT_VALUE: Dict[str, int] = {
  31. 'TB': 1024 * 1024 * 1024 * 1024,
  32. 'GB': 1024 * 1024 * 1024,
  33. 'MB': 1024 * 1024,
  34. 'TiB': 1000 * 1000 * 1000 * 1000,
  35. 'MiB': 1000 * 1000,
  36. 'KiB': 1000,
  37. }
  38. _XPATH_CACHE: Dict[str, XPath] = {}
  39. _LANG_TO_LC_CACHE: Dict[str, Dict[str, str]] = {}
  40. class _NotSetClass: # pylint: disable=too-few-public-methods
  41. """Internal class for this module, do not create instance of this class.
  42. Replace the None value, allow explicitly pass None as a function argument"""
  43. _NOTSET = _NotSetClass()
  44. def searx_useragent() -> str:
  45. """Return the searx User Agent"""
  46. return 'searx/{searx_version} {suffix}'.format(
  47. searx_version=VERSION_TAG, suffix=settings['outgoing']['useragent_suffix']
  48. ).strip()
  49. def gen_useragent(os_string: str = None) -> str:
  50. """Return a random browser User Agent
  51. See searx/data/useragents.json
  52. """
  53. return USER_AGENTS['ua'].format(os=os_string or choice(USER_AGENTS['os']), version=choice(USER_AGENTS['versions']))
  54. class _HTMLTextExtractorException(Exception):
  55. """Internal exception raised when the HTML is invalid"""
  56. class _HTMLTextExtractor(HTMLParser): # pylint: disable=W0223 # (see https://bugs.python.org/issue31844)
  57. """Internal class to extract text from HTML"""
  58. def __init__(self):
  59. HTMLParser.__init__(self)
  60. self.result = []
  61. self.tags = []
  62. def handle_starttag(self, tag, attrs):
  63. self.tags.append(tag)
  64. def handle_endtag(self, tag):
  65. if not self.tags:
  66. return
  67. if tag != self.tags[-1]:
  68. raise _HTMLTextExtractorException()
  69. self.tags.pop()
  70. def is_valid_tag(self):
  71. return not self.tags or self.tags[-1] not in _BLOCKED_TAGS
  72. def handle_data(self, data):
  73. if not self.is_valid_tag():
  74. return
  75. self.result.append(data)
  76. def handle_charref(self, name):
  77. if not self.is_valid_tag():
  78. return
  79. if name[0] in ('x', 'X'):
  80. codepoint = int(name[1:], 16)
  81. else:
  82. codepoint = int(name)
  83. self.result.append(chr(codepoint))
  84. def handle_entityref(self, name):
  85. if not self.is_valid_tag():
  86. return
  87. # codepoint = htmlentitydefs.name2codepoint[name]
  88. # self.result.append(chr(codepoint))
  89. self.result.append(name)
  90. def get_text(self):
  91. return ''.join(self.result).strip()
  92. def html_to_text(html_str: str) -> str:
  93. """Extract text from a HTML string
  94. Args:
  95. * html_str (str): string HTML
  96. Returns:
  97. * str: extracted text
  98. Examples:
  99. >>> html_to_text('Example <span id="42">#2</span>')
  100. 'Example #2'
  101. >>> html_to_text('<style>.span { color: red; }</style><span>Example</span>')
  102. 'Example'
  103. """
  104. html_str = html_str.replace('\n', ' ')
  105. html_str = ' '.join(html_str.split())
  106. s = _HTMLTextExtractor()
  107. try:
  108. s.feed(html_str)
  109. except _HTMLTextExtractorException:
  110. logger.debug("HTMLTextExtractor: invalid HTML\n%s", html_str)
  111. return s.get_text()
  112. def extract_text(xpath_results, allow_none: bool = False) -> Optional[str]:
  113. """Extract text from a lxml result
  114. * if xpath_results is list, extract the text from each result and concat the list
  115. * if xpath_results is a xml element, extract all the text node from it
  116. ( text_content() method from lxml )
  117. * if xpath_results is a string element, then it's already done
  118. """
  119. if isinstance(xpath_results, list):
  120. # it's list of result : concat everything using recursive call
  121. result = ''
  122. for e in xpath_results:
  123. result = result + (extract_text(e) or '')
  124. return result.strip()
  125. if isinstance(xpath_results, ElementBase):
  126. # it's a element
  127. text: str = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False)
  128. text = text.strip().replace('\n', ' ')
  129. return ' '.join(text.split())
  130. if isinstance(xpath_results, (_ElementStringResult, _ElementUnicodeResult, str, Number, bool)):
  131. return str(xpath_results)
  132. if xpath_results is None and allow_none:
  133. return None
  134. if xpath_results is None and not allow_none:
  135. raise ValueError('extract_text(None, allow_none=False)')
  136. raise ValueError('unsupported type')
  137. def normalize_url(url: str, base_url: str) -> str:
  138. """Normalize URL: add protocol, join URL with base_url, add trailing slash if there is no path
  139. Args:
  140. * url (str): Relative URL
  141. * base_url (str): Base URL, it must be an absolute URL.
  142. Example:
  143. >>> normalize_url('https://example.com', 'http://example.com/')
  144. 'https://example.com/'
  145. >>> normalize_url('//example.com', 'http://example.com/')
  146. 'http://example.com/'
  147. >>> normalize_url('//example.com', 'https://example.com/')
  148. 'https://example.com/'
  149. >>> normalize_url('/path?a=1', 'https://example.com')
  150. 'https://example.com/path?a=1'
  151. >>> normalize_url('', 'https://example.com')
  152. 'https://example.com/'
  153. >>> normalize_url('/test', '/path')
  154. raise ValueError
  155. Raises:
  156. * lxml.etree.ParserError
  157. Returns:
  158. * str: normalized URL
  159. """
  160. if url.startswith('//'):
  161. # add http or https to this kind of url //example.com/
  162. parsed_search_url = urlparse(base_url)
  163. url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url)
  164. elif url.startswith('/'):
  165. # fix relative url to the search engine
  166. url = urljoin(base_url, url)
  167. # fix relative urls that fall through the crack
  168. if '://' not in url:
  169. url = urljoin(base_url, url)
  170. parsed_url = urlparse(url)
  171. # add a / at this end of the url if there is no path
  172. if not parsed_url.netloc:
  173. raise ValueError('Cannot parse url')
  174. if not parsed_url.path:
  175. url += '/'
  176. return url
  177. def extract_url(xpath_results, base_url) -> str:
  178. """Extract and normalize URL from lxml Element
  179. Args:
  180. * xpath_results (Union[List[html.HtmlElement], html.HtmlElement]): lxml Element(s)
  181. * base_url (str): Base URL
  182. Example:
  183. >>> def f(s, search_url):
  184. >>> return searx.utils.extract_url(html.fromstring(s), search_url)
  185. >>> f('<span id="42">https://example.com</span>', 'http://example.com/')
  186. 'https://example.com/'
  187. >>> f('https://example.com', 'http://example.com/')
  188. 'https://example.com/'
  189. >>> f('//example.com', 'http://example.com/')
  190. 'http://example.com/'
  191. >>> f('//example.com', 'https://example.com/')
  192. 'https://example.com/'
  193. >>> f('/path?a=1', 'https://example.com')
  194. 'https://example.com/path?a=1'
  195. >>> f('', 'https://example.com')
  196. raise lxml.etree.ParserError
  197. >>> searx.utils.extract_url([], 'https://example.com')
  198. raise ValueError
  199. Raises:
  200. * ValueError
  201. * lxml.etree.ParserError
  202. Returns:
  203. * str: normalized URL
  204. """
  205. if xpath_results == []:
  206. raise ValueError('Empty url resultset')
  207. url = extract_text(xpath_results)
  208. if url:
  209. return normalize_url(url, base_url)
  210. raise ValueError('URL not found')
  211. def dict_subset(dictionnary: MutableMapping, properties: Set[str]) -> Dict:
  212. """Extract a subset of a dict
  213. Examples:
  214. >>> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'C'])
  215. {'A': 'a', 'C': 'c'}
  216. >>> >> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'D'])
  217. {'A': 'a'}
  218. """
  219. return {k: dictionnary[k] for k in properties if k in dictionnary}
  220. def get_torrent_size(filesize: str, filesize_multiplier: str) -> Optional[int]:
  221. """
  222. Args:
  223. * filesize (str): size
  224. * filesize_multiplier (str): TB, GB, .... TiB, GiB...
  225. Returns:
  226. * int: number of bytes
  227. Example:
  228. >>> get_torrent_size('5', 'GB')
  229. 5368709120
  230. >>> get_torrent_size('3.14', 'MiB')
  231. 3140000
  232. """
  233. try:
  234. multiplier = _STORAGE_UNIT_VALUE.get(filesize_multiplier, 1)
  235. return int(float(filesize) * multiplier)
  236. except ValueError:
  237. return None
  238. def convert_str_to_int(number_str: str) -> int:
  239. """Convert number_str to int or 0 if number_str is not a number."""
  240. if number_str.isdigit():
  241. return int(number_str)
  242. return 0
  243. def int_or_zero(num: Union[List[str], str]) -> int:
  244. """Convert num to int or 0. num can be either a str or a list.
  245. If num is a list, the first element is converted to int (or return 0 if the list is empty).
  246. If num is a str, see convert_str_to_int
  247. """
  248. if isinstance(num, list):
  249. if len(num) < 1:
  250. return 0
  251. num = num[0]
  252. return convert_str_to_int(num)
  253. def is_valid_lang(lang) -> Optional[Tuple[bool, str, str]]:
  254. """Return language code and name if lang describe a language.
  255. Examples:
  256. >>> is_valid_lang('zz')
  257. None
  258. >>> is_valid_lang('uk')
  259. (True, 'uk', 'ukrainian')
  260. >>> is_valid_lang(b'uk')
  261. (True, 'uk', 'ukrainian')
  262. >>> is_valid_lang('en')
  263. (True, 'en', 'english')
  264. >>> searx.utils.is_valid_lang('Español')
  265. (True, 'es', 'spanish')
  266. >>> searx.utils.is_valid_lang('Spanish')
  267. (True, 'es', 'spanish')
  268. """
  269. if isinstance(lang, bytes):
  270. lang = lang.decode()
  271. is_abbr = len(lang) == 2
  272. lang = lang.lower()
  273. if is_abbr:
  274. for l in language_codes:
  275. if l[0][:2] == lang:
  276. return (True, l[0][:2], l[3].lower())
  277. return None
  278. for l in language_codes:
  279. if l[1].lower() == lang or l[3].lower() == lang:
  280. return (True, l[0][:2], l[3].lower())
  281. return None
  282. def _get_lang_to_lc_dict(lang_list: List[str]) -> Dict[str, str]:
  283. key = str(lang_list)
  284. value = _LANG_TO_LC_CACHE.get(key, None)
  285. if value is None:
  286. value = {}
  287. for lang in lang_list:
  288. value.setdefault(lang.split('-')[0], lang)
  289. _LANG_TO_LC_CACHE[key] = value
  290. return value
  291. # babel's get_global contains all sorts of miscellaneous locale and territory related data
  292. # see get_global in: https://github.com/python-babel/babel/blob/master/babel/core.py
  293. def _get_from_babel(lang_code: str, key: str):
  294. match = get_global(key).get(lang_code.replace('-', '_'))
  295. # for some keys, such as territory_aliases, match may be a list
  296. if isinstance(match, str):
  297. return match.replace('_', '-')
  298. return match
  299. def _match_language(lang_code: str, lang_list=[], custom_aliases={}) -> Optional[str]: # pylint: disable=W0102
  300. """auxiliary function to match lang_code in lang_list"""
  301. # replace language code with a custom alias if necessary
  302. if lang_code in custom_aliases:
  303. lang_code = custom_aliases[lang_code]
  304. if lang_code in lang_list:
  305. return lang_code
  306. # try to get the most likely country for this language
  307. subtags = _get_from_babel(lang_code, 'likely_subtags')
  308. if subtags:
  309. if subtags in lang_list:
  310. return subtags
  311. subtag_parts = subtags.split('-')
  312. new_code = subtag_parts[0] + '-' + subtag_parts[-1]
  313. if new_code in custom_aliases:
  314. new_code = custom_aliases[new_code]
  315. if new_code in lang_list:
  316. return new_code
  317. # try to get the any supported country for this language
  318. return _get_lang_to_lc_dict(lang_list).get(lang_code)
  319. def match_language( # pylint: disable=W0102
  320. locale_code, lang_list=[], custom_aliases={}, fallback: Optional[str] = 'en-US'
  321. ) -> Optional[str]:
  322. """get the language code from lang_list that best matches locale_code"""
  323. # try to get language from given locale_code
  324. language = _match_language(locale_code, lang_list, custom_aliases)
  325. if language:
  326. return language
  327. locale_parts = locale_code.split('-')
  328. lang_code = locale_parts[0]
  329. # if locale_code has script, try matching without it
  330. if len(locale_parts) > 2:
  331. language = _match_language(lang_code + '-' + locale_parts[-1], lang_list, custom_aliases)
  332. if language:
  333. return language
  334. # try to get language using an equivalent country code
  335. if len(locale_parts) > 1:
  336. country_alias = _get_from_babel(locale_parts[-1], 'territory_aliases')
  337. if country_alias:
  338. language = _match_language(lang_code + '-' + country_alias[0], lang_list, custom_aliases)
  339. if language:
  340. return language
  341. # try to get language using an equivalent language code
  342. alias = _get_from_babel(lang_code, 'language_aliases')
  343. if alias:
  344. language = _match_language(alias, lang_list, custom_aliases)
  345. if language:
  346. return language
  347. if lang_code != locale_code:
  348. # try to get language from given language without giving the country
  349. language = _match_language(lang_code, lang_list, custom_aliases)
  350. return language or fallback
  351. def load_module(filename: str, module_dir: str) -> types.ModuleType:
  352. modname = splitext(filename)[0]
  353. modpath = join(module_dir, filename)
  354. # and https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
  355. spec = importlib.util.spec_from_file_location(modname, modpath)
  356. if not spec:
  357. raise ValueError(f"Error loading '{modpath}' module")
  358. module = importlib.util.module_from_spec(spec)
  359. if not spec.loader:
  360. raise ValueError(f"Error loading '{modpath}' module")
  361. spec.loader.exec_module(module)
  362. return module
  363. def to_string(obj: Any) -> str:
  364. """Convert obj to its string representation."""
  365. if isinstance(obj, str):
  366. return obj
  367. if hasattr(obj, '__str__'):
  368. return obj.__str__()
  369. return repr(obj)
  370. def ecma_unescape(string: str) -> str:
  371. """Python implementation of the unescape javascript function
  372. https://www.ecma-international.org/ecma-262/6.0/#sec-unescape-string
  373. https://developer.mozilla.org/fr/docs/Web/JavaScript/Reference/Objets_globaux/unescape
  374. Examples:
  375. >>> ecma_unescape('%u5409')
  376. '吉'
  377. >>> ecma_unescape('%20')
  378. ' '
  379. >>> ecma_unescape('%F3')
  380. 'ó'
  381. """
  382. # "%u5409" becomes "吉"
  383. string = _ECMA_UNESCAPE4_RE.sub(lambda e: chr(int(e.group(1), 16)), string)
  384. # "%20" becomes " ", "%F3" becomes "ó"
  385. string = _ECMA_UNESCAPE2_RE.sub(lambda e: chr(int(e.group(1), 16)), string)
  386. return string
  387. def get_string_replaces_function(replaces: Dict[str, str]) -> Callable[[str], str]:
  388. rep = {re.escape(k): v for k, v in replaces.items()}
  389. pattern = re.compile("|".join(rep.keys()))
  390. def func(text):
  391. return pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
  392. return func
  393. def get_engine_from_settings(name: str) -> Dict:
  394. """Return engine configuration from settings.yml of a given engine name"""
  395. if 'engines' not in settings:
  396. return {}
  397. for engine in settings['engines']:
  398. if 'name' not in engine:
  399. continue
  400. if name == engine['name']:
  401. return engine
  402. return {}
  403. def get_xpath(xpath_spec: XPathSpecType) -> XPath:
  404. """Return cached compiled XPath
  405. There is no thread lock.
  406. Worst case scenario, xpath_str is compiled more than one time.
  407. Args:
  408. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
  409. Returns:
  410. * result (bool, float, list, str): Results.
  411. Raises:
  412. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  413. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  414. """
  415. if isinstance(xpath_spec, str):
  416. result = _XPATH_CACHE.get(xpath_spec, None)
  417. if result is None:
  418. try:
  419. result = XPath(xpath_spec)
  420. except XPathSyntaxError as e:
  421. raise SearxXPathSyntaxException(xpath_spec, str(e.msg)) from e
  422. _XPATH_CACHE[xpath_spec] = result
  423. return result
  424. if isinstance(xpath_spec, XPath):
  425. return xpath_spec
  426. raise TypeError('xpath_spec must be either a str or a lxml.etree.XPath')
  427. def eval_xpath(element: ElementBase, xpath_spec: XPathSpecType):
  428. """Equivalent of element.xpath(xpath_str) but compile xpath_str once for all.
  429. See https://lxml.de/xpathxslt.html#xpath-return-values
  430. Args:
  431. * element (ElementBase): [description]
  432. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
  433. Returns:
  434. * result (bool, float, list, str): Results.
  435. Raises:
  436. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  437. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  438. * SearxEngineXPathException: Raise when the XPath can't be evaluated.
  439. """
  440. xpath = get_xpath(xpath_spec)
  441. try:
  442. return xpath(element)
  443. except XPathError as e:
  444. arg = ' '.join([str(i) for i in e.args])
  445. raise SearxEngineXPathException(xpath_spec, arg) from e
  446. def eval_xpath_list(element: ElementBase, xpath_spec: XPathSpecType, min_len: int = None):
  447. """Same as eval_xpath, check if the result is a list
  448. Args:
  449. * element (ElementBase): [description]
  450. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
  451. * min_len (int, optional): [description]. Defaults to None.
  452. Raises:
  453. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  454. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  455. * SearxEngineXPathException: raise if the result is not a list
  456. Returns:
  457. * result (bool, float, list, str): Results.
  458. """
  459. result = eval_xpath(element, xpath_spec)
  460. if not isinstance(result, list):
  461. raise SearxEngineXPathException(xpath_spec, 'the result is not a list')
  462. if min_len is not None and min_len > len(result):
  463. raise SearxEngineXPathException(xpath_spec, 'len(xpath_str) < ' + str(min_len))
  464. return result
  465. def eval_xpath_getindex(elements: ElementBase, xpath_spec: XPathSpecType, index: int, default=_NOTSET):
  466. """Call eval_xpath_list then get one element using the index parameter.
  467. If the index does not exist, either aise an exception is default is not set,
  468. other return the default value (can be None).
  469. Args:
  470. * elements (ElementBase): lxml element to apply the xpath.
  471. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath.
  472. * index (int): index to get
  473. * default (Object, optional): Defaults if index doesn't exist.
  474. Raises:
  475. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  476. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  477. * SearxEngineXPathException: if the index is not found. Also see eval_xpath.
  478. Returns:
  479. * result (bool, float, list, str): Results.
  480. """
  481. result = eval_xpath_list(elements, xpath_spec)
  482. if -len(result) <= index < len(result):
  483. return result[index]
  484. if default == _NOTSET:
  485. # raise an SearxEngineXPathException instead of IndexError
  486. # to record xpath_spec
  487. raise SearxEngineXPathException(xpath_spec, 'index ' + str(index) + ' not found')
  488. return default