update_engine_descriptions.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. #!/usr/bin/env python
  2. # lint: pylint
  3. # SPDX-License-Identifier: AGPL-3.0-or-later
  4. """Fetch website description from websites and from
  5. :origin:`searx/engines/wikidata.py` engine.
  6. Output file: :origin:`searx/data/engine_descriptions.json`.
  7. """
  8. # pylint: disable=invalid-name, global-statement
  9. import json
  10. from urllib.parse import urlparse
  11. from os.path import join
  12. from lxml.html import fromstring
  13. from searx.engines import wikidata, set_loggers
  14. from searx.utils import extract_text, match_language
  15. from searx.locales import LOCALE_NAMES, locales_initialize
  16. from searx import searx_dir
  17. from searx.utils import gen_useragent, detect_language
  18. import searx.search
  19. import searx.network
  20. set_loggers(wikidata, 'wikidata')
  21. locales_initialize()
  22. SPARQL_WIKIPEDIA_ARTICLE = """
  23. SELECT DISTINCT ?item ?name
  24. WHERE {
  25. hint:Query hint:optimizer "None".
  26. VALUES ?item { %IDS% }
  27. ?article schema:about ?item ;
  28. schema:inLanguage ?lang ;
  29. schema:name ?name ;
  30. schema:isPartOf [ wikibase:wikiGroup "wikipedia" ] .
  31. FILTER(?lang in (%LANGUAGES_SPARQL%)) .
  32. FILTER (!CONTAINS(?name, ':')) .
  33. }
  34. """
  35. SPARQL_DESCRIPTION = """
  36. SELECT DISTINCT ?item ?itemDescription
  37. WHERE {
  38. VALUES ?item { %IDS% }
  39. ?item schema:description ?itemDescription .
  40. FILTER (lang(?itemDescription) in (%LANGUAGES_SPARQL%))
  41. }
  42. ORDER BY ?itemLang
  43. """
  44. NOT_A_DESCRIPTION = [
  45. 'web site',
  46. 'site web',
  47. 'komputa serĉilo',
  48. 'interreta serĉilo',
  49. 'bilaketa motor',
  50. 'web search engine',
  51. 'wikimedia täpsustuslehekülg',
  52. ]
  53. SKIP_ENGINE_SOURCE = [
  54. # fmt: off
  55. ('gitlab', 'wikidata')
  56. # descriptions are about wikipedia disambiguation pages
  57. # fmt: on
  58. ]
  59. LANGUAGES = LOCALE_NAMES.keys()
  60. WIKIPEDIA_LANGUAGES = {'language': 'wikipedia_language'}
  61. LANGUAGES_SPARQL = ''
  62. IDS = None
  63. descriptions = {}
  64. wd_to_engine_name = {}
  65. def normalize_description(description):
  66. for c in [chr(c) for c in range(0, 31)]:
  67. description = description.replace(c, ' ')
  68. description = ' '.join(description.strip().split())
  69. return description
  70. def update_description(engine_name, lang, description, source, replace=True):
  71. if not isinstance(description, str):
  72. return
  73. description = normalize_description(description)
  74. if description.lower() == engine_name.lower():
  75. return
  76. if description.lower() in NOT_A_DESCRIPTION:
  77. return
  78. if (engine_name, source) in SKIP_ENGINE_SOURCE:
  79. return
  80. if ' ' not in description:
  81. # skip unique word description (like "website")
  82. return
  83. if replace or lang not in descriptions[engine_name]:
  84. descriptions[engine_name][lang] = [description, source]
  85. def get_wikipedia_summary(lang, pageid):
  86. params = {'language': lang.replace('_', '-'), 'headers': {}}
  87. searx.engines.engines['wikipedia'].request(pageid, params)
  88. try:
  89. response = searx.network.get(params['url'], headers=params['headers'], timeout=10)
  90. response.raise_for_status()
  91. api_result = json.loads(response.text)
  92. return api_result.get('extract')
  93. except Exception: # pylint: disable=broad-except
  94. return None
  95. def get_website_description(url, lang1, lang2=None):
  96. headers = {
  97. 'User-Agent': gen_useragent(),
  98. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  99. 'DNT': '1',
  100. 'Upgrade-Insecure-Requests': '1',
  101. 'Sec-GPC': '1',
  102. 'Cache-Control': 'max-age=0',
  103. }
  104. if lang1 is not None:
  105. lang_list = [lang1]
  106. if lang2 is not None:
  107. lang_list.append(lang2)
  108. headers['Accept-Language'] = f'{",".join(lang_list)};q=0.8'
  109. try:
  110. response = searx.network.get(url, headers=headers, timeout=10)
  111. response.raise_for_status()
  112. except Exception: # pylint: disable=broad-except
  113. return (None, None)
  114. try:
  115. html = fromstring(response.text)
  116. except ValueError:
  117. html = fromstring(response.content)
  118. description = extract_text(html.xpath('/html/head/meta[@name="description"]/@content'))
  119. if not description:
  120. description = extract_text(html.xpath('/html/head/meta[@property="og:description"]/@content'))
  121. if not description:
  122. description = extract_text(html.xpath('/html/head/title'))
  123. lang = extract_text(html.xpath('/html/@lang'))
  124. if lang is None and len(lang1) > 0:
  125. lang = lang1
  126. lang = detect_language(description) or lang or 'en'
  127. lang = lang.split('_')[0]
  128. lang = lang.split('-')[0]
  129. return (lang, description)
  130. def initialize():
  131. global IDS, WIKIPEDIA_LANGUAGES, LANGUAGES_SPARQL
  132. searx.search.initialize()
  133. wikipedia_engine = searx.engines.engines['wikipedia']
  134. WIKIPEDIA_LANGUAGES = {language: wikipedia_engine.url_lang(language.replace('_', '-')) for language in LANGUAGES}
  135. WIKIPEDIA_LANGUAGES['nb_NO'] = 'no'
  136. LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values()))
  137. for engine_name, engine in searx.engines.engines.items():
  138. descriptions[engine_name] = {}
  139. wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
  140. if wikidata_id is not None:
  141. wd_to_engine_name.setdefault(wikidata_id, set()).add(engine_name)
  142. IDS = ' '.join(list(map(lambda wd_id: 'wd:' + wd_id, wd_to_engine_name.keys())))
  143. def fetch_wikidata_descriptions():
  144. searx.network.set_timeout_for_thread(60)
  145. result = wikidata.send_wikidata_query(
  146. SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  147. )
  148. if result is not None:
  149. for binding in result['results']['bindings']:
  150. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  151. wikidata_lang = binding['itemDescription']['xml:lang']
  152. description = binding['itemDescription']['value']
  153. for engine_name in wd_to_engine_name[wikidata_id]:
  154. for lang in LANGUAGES:
  155. if WIKIPEDIA_LANGUAGES[lang] == wikidata_lang:
  156. update_description(engine_name, lang, description, 'wikidata')
  157. def fetch_wikipedia_descriptions():
  158. result = wikidata.send_wikidata_query(
  159. SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  160. )
  161. if result is not None:
  162. for binding in result['results']['bindings']:
  163. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  164. wikidata_lang = binding['name']['xml:lang']
  165. pageid = binding['name']['value']
  166. for engine_name in wd_to_engine_name[wikidata_id]:
  167. for lang in LANGUAGES:
  168. if WIKIPEDIA_LANGUAGES[lang] == wikidata_lang:
  169. description = get_wikipedia_summary(lang, pageid)
  170. update_description(engine_name, lang, description, 'wikipedia')
  171. def normalize_url(url):
  172. url = url.replace('{language}', 'en')
  173. url = urlparse(url)._replace(path='/', params='', query='', fragment='').geturl()
  174. url = url.replace('https://api.', 'https://')
  175. return url
  176. def fetch_website_description(engine_name, website):
  177. default_lang, default_description = get_website_description(website, None, None)
  178. if default_lang is None or default_description is None:
  179. # the front page can't be fetched: skip this engine
  180. return
  181. wikipedia_languages_r = {V: K for K, V in WIKIPEDIA_LANGUAGES.items()}
  182. languages = ['en', 'es', 'pt', 'ru', 'tr', 'fr']
  183. languages = languages + [l for l in LANGUAGES if l not in languages]
  184. previous_matched_lang = None
  185. previous_count = 0
  186. for lang in languages:
  187. if lang not in descriptions[engine_name]:
  188. fetched_lang, desc = get_website_description(website, lang, WIKIPEDIA_LANGUAGES[lang])
  189. if fetched_lang is None or desc is None:
  190. continue
  191. matched_lang = match_language(fetched_lang, LANGUAGES, fallback=None)
  192. if matched_lang is None:
  193. fetched_wikipedia_lang = match_language(fetched_lang, WIKIPEDIA_LANGUAGES.values(), fallback=None)
  194. matched_lang = wikipedia_languages_r.get(fetched_wikipedia_lang)
  195. if matched_lang is not None:
  196. update_description(engine_name, matched_lang, desc, website, replace=False)
  197. # check if desc changed with the different lang values
  198. if matched_lang == previous_matched_lang:
  199. previous_count += 1
  200. if previous_count == 6:
  201. # the website has returned the same description for 6 different languages in Accept-Language header
  202. # stop now
  203. break
  204. else:
  205. previous_matched_lang = matched_lang
  206. previous_count = 0
  207. def fetch_website_descriptions():
  208. for engine_name, engine in searx.engines.engines.items():
  209. website = getattr(engine, "about", {}).get('website')
  210. if website is None and hasattr(engine, "search_url"):
  211. website = normalize_url(getattr(engine, "search_url"))
  212. if website is None and hasattr(engine, "base_url"):
  213. website = normalize_url(getattr(engine, "base_url"))
  214. if website is not None:
  215. fetch_website_description(engine_name, website)
  216. def get_engine_descriptions_filename():
  217. return join(join(searx_dir, "data"), "engine_descriptions.json")
  218. def get_output():
  219. """
  220. From descriptions[engine][language] = [description, source]
  221. To
  222. * output[language][engine] = description_and_source
  223. * description_and_source can be:
  224. * [description, source]
  225. * description (if source = "wikipedia")
  226. * [f"engine:lang", "ref"] (reference to another existing description)
  227. """
  228. output = {locale: {} for locale in LOCALE_NAMES}
  229. seen_descriptions = {}
  230. for engine_name, lang_descriptions in descriptions.items():
  231. for language, description in lang_descriptions.items():
  232. if description[0] in seen_descriptions:
  233. ref = seen_descriptions[description[0]]
  234. description = [f'{ref[0]}:{ref[1]}', 'ref']
  235. else:
  236. seen_descriptions[description[0]] = (engine_name, language)
  237. if description[1] == 'wikipedia':
  238. description = description[0]
  239. output.setdefault(language, {}).setdefault(engine_name, description)
  240. return output
  241. def main():
  242. initialize()
  243. print('Fetching wikidata descriptions')
  244. fetch_wikidata_descriptions()
  245. print('Fetching wikipedia descriptions')
  246. fetch_wikipedia_descriptions()
  247. print('Fetching website descriptions')
  248. fetch_website_descriptions()
  249. output = get_output()
  250. with open(get_engine_descriptions_filename(), 'w', encoding='utf8') as f:
  251. f.write(json.dumps(output, indent=1, separators=(',', ':'), ensure_ascii=False))
  252. if __name__ == "__main__":
  253. main()