update_engine_descriptions.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. #!/usr/bin/env python
  2. # lint: pylint
  3. # SPDX-License-Identifier: AGPL-3.0-or-later
  4. """Fetch website description from websites and from
  5. :origin:`searx/engines/wikidata.py` engine.
  6. Output file: :origin:`searx/data/engine_descriptions.json`.
  7. """
  8. # pylint: disable=invalid-name, global-statement
  9. import json
  10. from urllib.parse import urlparse
  11. from os.path import join
  12. from lxml.html import fromstring
  13. from searx.engines import wikidata, set_loggers
  14. from searx.utils import extract_text, searx_useragent
  15. from searx.locales import LOCALE_NAMES, locales_initialize, match_locale
  16. from searx import searx_dir
  17. from searx.utils import gen_useragent, detect_language
  18. import searx.search
  19. import searx.network
  20. set_loggers(wikidata, 'wikidata')
  21. locales_initialize()
  22. # you can run the query in https://query.wikidata.org
  23. # replace %IDS% by Wikidata entities separated by spaces with the prefix wd:
  24. # for example wd:Q182496 wd:Q1540899
  25. # replace %LANGUAGES_SPARQL% by languages
  26. SPARQL_WIKIPEDIA_ARTICLE = """
  27. SELECT DISTINCT ?item ?name ?article ?lang
  28. WHERE {
  29. hint:Query hint:optimizer "None".
  30. VALUES ?item { %IDS% }
  31. ?article schema:about ?item ;
  32. schema:inLanguage ?lang ;
  33. schema:name ?name ;
  34. schema:isPartOf [ wikibase:wikiGroup "wikipedia" ] .
  35. FILTER(?lang in (%LANGUAGES_SPARQL%)) .
  36. FILTER (!CONTAINS(?name, ':')) .
  37. }
  38. ORDER BY ?item ?lang
  39. """
  40. SPARQL_DESCRIPTION = """
  41. SELECT DISTINCT ?item ?itemDescription
  42. WHERE {
  43. VALUES ?item { %IDS% }
  44. ?item schema:description ?itemDescription .
  45. FILTER (lang(?itemDescription) in (%LANGUAGES_SPARQL%))
  46. }
  47. ORDER BY ?itemLang
  48. """
  49. NOT_A_DESCRIPTION = [
  50. 'web site',
  51. 'site web',
  52. 'komputa serĉilo',
  53. 'interreta serĉilo',
  54. 'bilaketa motor',
  55. 'web search engine',
  56. 'wikimedia täpsustuslehekülg',
  57. ]
  58. SKIP_ENGINE_SOURCE = [
  59. # fmt: off
  60. ('gitlab', 'wikidata')
  61. # descriptions are about wikipedia disambiguation pages
  62. # fmt: on
  63. ]
  64. WIKIPEDIA_LANGUAGES = {}
  65. LANGUAGES_SPARQL = ''
  66. IDS = None
  67. WIKIPEDIA_LANGUAGE_VARIANTS = {'zh_Hant': 'zh-tw'}
  68. descriptions = {}
  69. wd_to_engine_name = {}
  70. def normalize_description(description):
  71. for c in [chr(c) for c in range(0, 31)]:
  72. description = description.replace(c, ' ')
  73. description = ' '.join(description.strip().split())
  74. return description
  75. def update_description(engine_name, lang, description, source, replace=True):
  76. if not isinstance(description, str):
  77. return
  78. description = normalize_description(description)
  79. if description.lower() == engine_name.lower():
  80. return
  81. if description.lower() in NOT_A_DESCRIPTION:
  82. return
  83. if (engine_name, source) in SKIP_ENGINE_SOURCE:
  84. return
  85. if ' ' not in description:
  86. # skip unique word description (like "website")
  87. return
  88. if replace or lang not in descriptions[engine_name]:
  89. descriptions[engine_name][lang] = [description, source]
  90. def get_wikipedia_summary(wikipedia_url, searxng_locale):
  91. # get the REST API URL from the HTML URL
  92. # Headers
  93. headers = {'User-Agent': searx_useragent()}
  94. if searxng_locale in WIKIPEDIA_LANGUAGE_VARIANTS:
  95. headers['Accept-Language'] = WIKIPEDIA_LANGUAGE_VARIANTS.get(searxng_locale)
  96. # URL path : from HTML URL to REST API URL
  97. parsed_url = urlparse(wikipedia_url)
  98. # remove the /wiki/ prefix
  99. article_name = parsed_url.path.split('/wiki/')[1]
  100. # article_name is already encoded but not the / which is required for the REST API call
  101. encoded_article_name = article_name.replace('/', '%2F')
  102. path = '/api/rest_v1/page/summary/' + encoded_article_name
  103. wikipedia_rest_url = parsed_url._replace(path=path).geturl()
  104. try:
  105. response = searx.network.get(wikipedia_rest_url, headers=headers, timeout=10)
  106. response.raise_for_status()
  107. except Exception as e: # pylint: disable=broad-except
  108. print(" ", wikipedia_url, e)
  109. return None
  110. api_result = json.loads(response.text)
  111. return api_result.get('extract')
  112. def get_website_description(url, lang1, lang2=None):
  113. headers = {
  114. 'User-Agent': gen_useragent(),
  115. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  116. 'DNT': '1',
  117. 'Upgrade-Insecure-Requests': '1',
  118. 'Sec-GPC': '1',
  119. 'Cache-Control': 'max-age=0',
  120. }
  121. if lang1 is not None:
  122. lang_list = [lang1]
  123. if lang2 is not None:
  124. lang_list.append(lang2)
  125. headers['Accept-Language'] = f'{",".join(lang_list)};q=0.8'
  126. try:
  127. response = searx.network.get(url, headers=headers, timeout=10)
  128. response.raise_for_status()
  129. except Exception: # pylint: disable=broad-except
  130. return (None, None)
  131. try:
  132. html = fromstring(response.text)
  133. except ValueError:
  134. html = fromstring(response.content)
  135. description = extract_text(html.xpath('/html/head/meta[@name="description"]/@content'))
  136. if not description:
  137. description = extract_text(html.xpath('/html/head/meta[@property="og:description"]/@content'))
  138. if not description:
  139. description = extract_text(html.xpath('/html/head/title'))
  140. lang = extract_text(html.xpath('/html/@lang'))
  141. if lang is None and len(lang1) > 0:
  142. lang = lang1
  143. lang = detect_language(description) or lang or 'en'
  144. lang = lang.split('_')[0]
  145. lang = lang.split('-')[0]
  146. return (lang, description)
  147. def initialize():
  148. global IDS, LANGUAGES_SPARQL
  149. searx.search.initialize()
  150. wikipedia_engine = searx.engines.engines['wikipedia']
  151. locale2lang = {'nl-BE': 'nl'}
  152. for sxng_ui_lang in LOCALE_NAMES:
  153. sxng_ui_alias = locale2lang.get(sxng_ui_lang, sxng_ui_lang)
  154. wiki_lang = None
  155. if sxng_ui_alias in wikipedia_engine.traits.custom['WIKIPEDIA_LANGUAGES']:
  156. wiki_lang = sxng_ui_alias
  157. if not wiki_lang:
  158. wiki_lang = wikipedia_engine.traits.get_language(sxng_ui_alias)
  159. if not wiki_lang:
  160. print(f"WIKIPEDIA_LANGUAGES missing {sxng_ui_lang}")
  161. continue
  162. WIKIPEDIA_LANGUAGES[sxng_ui_lang] = wiki_lang
  163. LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values()))
  164. for engine_name, engine in searx.engines.engines.items():
  165. descriptions[engine_name] = {}
  166. wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
  167. if wikidata_id is not None:
  168. wd_to_engine_name.setdefault(wikidata_id, set()).add(engine_name)
  169. IDS = ' '.join(list(map(lambda wd_id: 'wd:' + wd_id, wd_to_engine_name.keys())))
  170. def fetch_wikidata_descriptions():
  171. print('Fetching wikidata descriptions')
  172. searx.network.set_timeout_for_thread(60)
  173. result = wikidata.send_wikidata_query(
  174. SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  175. )
  176. if result is not None:
  177. for binding in result['results']['bindings']:
  178. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  179. wikidata_lang = binding['itemDescription']['xml:lang']
  180. desc = binding['itemDescription']['value']
  181. for engine_name in wd_to_engine_name[wikidata_id]:
  182. for searxng_locale in LOCALE_NAMES:
  183. if WIKIPEDIA_LANGUAGES[searxng_locale] != wikidata_lang:
  184. continue
  185. print(
  186. f" engine: {engine_name:20} / wikidata_lang: {wikidata_lang:5}",
  187. f"/ len(wikidata_desc): {len(desc)}",
  188. )
  189. update_description(engine_name, searxng_locale, desc, 'wikidata')
  190. def fetch_wikipedia_descriptions():
  191. print('Fetching wikipedia descriptions')
  192. result = wikidata.send_wikidata_query(
  193. SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  194. )
  195. if result is not None:
  196. for binding in result['results']['bindings']:
  197. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  198. wikidata_lang = binding['name']['xml:lang']
  199. wikipedia_url = binding['article']['value'] # for example the URL https://de.wikipedia.org/wiki/PubMed
  200. for engine_name in wd_to_engine_name[wikidata_id]:
  201. for searxng_locale in LOCALE_NAMES:
  202. if WIKIPEDIA_LANGUAGES[searxng_locale] != wikidata_lang:
  203. continue
  204. desc = get_wikipedia_summary(wikipedia_url, searxng_locale)
  205. if not desc:
  206. continue
  207. print(
  208. f" engine: {engine_name:20} / wikidata_lang: {wikidata_lang:5}",
  209. f"/ len(wikipedia_desc): {len(desc)}",
  210. )
  211. update_description(engine_name, searxng_locale, desc, 'wikipedia')
  212. def normalize_url(url):
  213. url = url.replace('{language}', 'en')
  214. url = urlparse(url)._replace(path='/', params='', query='', fragment='').geturl()
  215. url = url.replace('https://api.', 'https://')
  216. return url
  217. def fetch_website_description(engine_name, website):
  218. print(f"- fetch website descr: {engine_name} / {website}")
  219. default_lang, default_description = get_website_description(website, None, None)
  220. if default_lang is None or default_description is None:
  221. # the front page can't be fetched: skip this engine
  222. return
  223. # to specify an order in where the most common languages are in front of the
  224. # language list ..
  225. languages = ['en', 'es', 'pt', 'ru', 'tr', 'fr']
  226. languages = languages + [l for l in LOCALE_NAMES if l not in languages]
  227. previous_matched_lang = None
  228. previous_count = 0
  229. for lang in languages:
  230. if lang in descriptions[engine_name]:
  231. continue
  232. fetched_lang, desc = get_website_description(website, lang, WIKIPEDIA_LANGUAGES[lang])
  233. if fetched_lang is None or desc is None:
  234. continue
  235. # check if desc changed with the different lang values
  236. if fetched_lang == previous_matched_lang:
  237. previous_count += 1
  238. if previous_count == 6:
  239. # the website has returned the same description for 6 different languages in Accept-Language header
  240. # stop now
  241. break
  242. else:
  243. previous_matched_lang = fetched_lang
  244. previous_count = 0
  245. # Don't trust in the value of fetched_lang, some websites return
  246. # for some inappropriate values, by example bing-images::
  247. #
  248. # requested lang: zh-Hans-CN / fetched lang: ceb / desc: 查看根据您的兴趣量身定制的提要
  249. #
  250. # The lang ceb is "Cebuano" but the description is given in zh-Hans-CN
  251. print(
  252. f" engine: {engine_name:20} / requested lang:{lang:7}"
  253. f" / fetched lang: {fetched_lang:7} / len(desc): {len(desc)}"
  254. )
  255. matched_lang = match_locale(fetched_lang, LOCALE_NAMES.keys(), fallback=lang)
  256. update_description(engine_name, matched_lang, desc, website, replace=False)
  257. def fetch_website_descriptions():
  258. print('Fetching website descriptions')
  259. for engine_name, engine in searx.engines.engines.items():
  260. website = getattr(engine, "about", {}).get('website')
  261. if website is None and hasattr(engine, "search_url"):
  262. website = normalize_url(getattr(engine, "search_url"))
  263. if website is None and hasattr(engine, "base_url"):
  264. website = normalize_url(getattr(engine, "base_url"))
  265. if website is not None:
  266. fetch_website_description(engine_name, website)
  267. def get_engine_descriptions_filename():
  268. return join(join(searx_dir, "data"), "engine_descriptions.json")
  269. def get_output():
  270. """
  271. From descriptions[engine][language] = [description, source]
  272. To
  273. * output[language][engine] = description_and_source
  274. * description_and_source can be:
  275. * [description, source]
  276. * description (if source = "wikipedia")
  277. * [f"engine:lang", "ref"] (reference to another existing description)
  278. """
  279. output = {locale: {} for locale in LOCALE_NAMES}
  280. seen_descriptions = {}
  281. for engine_name, lang_descriptions in descriptions.items():
  282. for language, description in lang_descriptions.items():
  283. if description[0] in seen_descriptions:
  284. ref = seen_descriptions[description[0]]
  285. description = [f'{ref[0]}:{ref[1]}', 'ref']
  286. else:
  287. seen_descriptions[description[0]] = (engine_name, language)
  288. if description[1] == 'wikipedia':
  289. description = description[0]
  290. output.setdefault(language, {}).setdefault(engine_name, description)
  291. return output
  292. def main():
  293. initialize()
  294. fetch_wikidata_descriptions()
  295. fetch_wikipedia_descriptions()
  296. fetch_website_descriptions()
  297. output = get_output()
  298. with open(get_engine_descriptions_filename(), 'w', encoding='utf8') as f:
  299. f.write(json.dumps(output, indent=1, separators=(',', ':'), ensure_ascii=False))
  300. if __name__ == "__main__":
  301. main()