update_engine_descriptions.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. #!/usr/bin/env python
  2. # SPDX-License-Identifier: AGPL-3.0-or-later
  3. """Fetch website description from websites and from
  4. :origin:`searx/engines/wikidata.py` engine.
  5. Output file: :origin:`searx/data/engine_descriptions.json`.
  6. """
  7. import json
  8. from urllib.parse import urlparse
  9. from os.path import join
  10. from lxml.html import fromstring
  11. from langdetect import detect_langs
  12. from langdetect.lang_detect_exception import LangDetectException
  13. from searx.engines import wikidata, set_loggers
  14. from searx.utils import extract_text, match_language
  15. from searx.locales import LOCALE_NAMES
  16. from searx import searx_dir
  17. from searx.utils import gen_useragent
  18. import searx.search
  19. import searx.network
  20. set_loggers(wikidata, 'wikidata')
  21. SPARQL_WIKIPEDIA_ARTICLE = """
  22. SELECT DISTINCT ?item ?name
  23. WHERE {
  24. hint:Query hint:optimizer "None".
  25. VALUES ?item { %IDS% }
  26. ?article schema:about ?item ;
  27. schema:inLanguage ?lang ;
  28. schema:name ?name ;
  29. schema:isPartOf [ wikibase:wikiGroup "wikipedia" ] .
  30. FILTER(?lang in (%LANGUAGES_SPARQL%)) .
  31. FILTER (!CONTAINS(?name, ':')) .
  32. }
  33. """
  34. SPARQL_DESCRIPTION = """
  35. SELECT DISTINCT ?item ?itemDescription
  36. WHERE {
  37. VALUES ?item { %IDS% }
  38. ?item schema:description ?itemDescription .
  39. FILTER (lang(?itemDescription) in (%LANGUAGES_SPARQL%))
  40. }
  41. ORDER BY ?itemLang
  42. """
  43. NOT_A_DESCRIPTION = [
  44. 'web site',
  45. 'site web',
  46. 'komputa serĉilo',
  47. 'interreta serĉilo',
  48. 'bilaketa motor',
  49. 'web search engine',
  50. 'wikimedia täpsustuslehekülg',
  51. ]
  52. SKIP_ENGINE_SOURCE = [
  53. # fmt: off
  54. ('gitlab', 'wikidata')
  55. # descriptions are about wikipedia disambiguation pages
  56. # fmt: on
  57. ]
  58. LANGUAGES = LOCALE_NAMES.keys()
  59. WIKIPEDIA_LANGUAGES = {'language': 'wikipedia_language'}
  60. LANGUAGES_SPARQL = ''
  61. IDS = None
  62. descriptions = {}
  63. wd_to_engine_name = {}
  64. def normalize_description(description):
  65. for c in [chr(c) for c in range(0, 31)]:
  66. description = description.replace(c, ' ')
  67. description = ' '.join(description.strip().split())
  68. return description
  69. def update_description(engine_name, lang, description, source, replace=True):
  70. if not isinstance(description, str):
  71. return
  72. description = normalize_description(description)
  73. if description.lower() == engine_name.lower():
  74. return
  75. if description.lower() in NOT_A_DESCRIPTION:
  76. return
  77. if (engine_name, source) in SKIP_ENGINE_SOURCE:
  78. return
  79. if ' ' not in description:
  80. # skip unique word description (like "website")
  81. return
  82. if replace or lang not in descriptions[engine_name]:
  83. descriptions[engine_name][lang] = [description, source]
  84. def get_wikipedia_summary(lang, pageid):
  85. params = {'language': lang.replace('_', '-'), 'headers': {}}
  86. searx.engines.engines['wikipedia'].request(pageid, params)
  87. try:
  88. response = searx.network.get(params['url'], headers=params['headers'], timeout=10)
  89. response.raise_for_status()
  90. api_result = json.loads(response.text)
  91. return api_result.get('extract')
  92. except:
  93. return None
  94. def detect_language(text):
  95. try:
  96. r = detect_langs(str(text)) # pylint: disable=E1101
  97. except LangDetectException:
  98. return None
  99. if len(r) > 0 and r[0].prob > 0.95:
  100. return r[0].lang
  101. return None
  102. def get_website_description(url, lang1, lang2=None):
  103. headers = {
  104. 'User-Agent': gen_useragent(),
  105. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  106. 'DNT': '1',
  107. 'Upgrade-Insecure-Requests': '1',
  108. 'Sec-GPC': '1',
  109. 'Cache-Control': 'max-age=0',
  110. }
  111. if lang1 is not None:
  112. lang_list = [lang1]
  113. if lang2 is not None:
  114. lang_list.append(lang2)
  115. headers['Accept-Language'] = f'{",".join(lang_list)};q=0.8'
  116. try:
  117. response = searx.network.get(url, headers=headers, timeout=10)
  118. response.raise_for_status()
  119. except Exception:
  120. return (None, None)
  121. try:
  122. html = fromstring(response.text)
  123. except ValueError:
  124. html = fromstring(response.content)
  125. description = extract_text(html.xpath('/html/head/meta[@name="description"]/@content'))
  126. if not description:
  127. description = extract_text(html.xpath('/html/head/meta[@property="og:description"]/@content'))
  128. if not description:
  129. description = extract_text(html.xpath('/html/head/title'))
  130. lang = extract_text(html.xpath('/html/@lang'))
  131. if lang is None and len(lang1) > 0:
  132. lang = lang1
  133. lang = detect_language(description) or lang or 'en'
  134. lang = lang.split('_')[0]
  135. lang = lang.split('-')[0]
  136. return (lang, description)
  137. def initialize():
  138. global IDS, WIKIPEDIA_LANGUAGES, LANGUAGES_SPARQL
  139. searx.search.initialize()
  140. wikipedia_engine = searx.engines.engines['wikipedia']
  141. WIKIPEDIA_LANGUAGES = {language: wikipedia_engine.url_lang(language.replace('_', '-')) for language in LANGUAGES}
  142. WIKIPEDIA_LANGUAGES['nb_NO'] = 'no'
  143. LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values()))
  144. for engine_name, engine in searx.engines.engines.items():
  145. descriptions[engine_name] = {}
  146. wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
  147. if wikidata_id is not None:
  148. wd_to_engine_name.setdefault(wikidata_id, set()).add(engine_name)
  149. IDS = ' '.join(list(map(lambda wd_id: 'wd:' + wd_id, wd_to_engine_name.keys())))
  150. def fetch_wikidata_descriptions():
  151. searx.network.set_timeout_for_thread(60)
  152. result = wikidata.send_wikidata_query(
  153. SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  154. )
  155. if result is not None:
  156. for binding in result['results']['bindings']:
  157. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  158. wikidata_lang = binding['itemDescription']['xml:lang']
  159. description = binding['itemDescription']['value']
  160. for engine_name in wd_to_engine_name[wikidata_id]:
  161. for lang in LANGUAGES:
  162. if WIKIPEDIA_LANGUAGES[lang] == wikidata_lang:
  163. update_description(engine_name, lang, description, 'wikidata')
  164. def fetch_wikipedia_descriptions():
  165. result = wikidata.send_wikidata_query(
  166. SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  167. )
  168. if result is not None:
  169. for binding in result['results']['bindings']:
  170. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  171. wikidata_lang = binding['name']['xml:lang']
  172. pageid = binding['name']['value']
  173. for engine_name in wd_to_engine_name[wikidata_id]:
  174. for lang in LANGUAGES:
  175. if WIKIPEDIA_LANGUAGES[lang] == wikidata_lang:
  176. description = get_wikipedia_summary(lang, pageid)
  177. update_description(engine_name, lang, description, 'wikipedia')
  178. def normalize_url(url):
  179. url = url.replace('{language}', 'en')
  180. url = urlparse(url)._replace(path='/', params='', query='', fragment='').geturl()
  181. url = url.replace('https://api.', 'https://')
  182. return url
  183. def fetch_website_description(engine_name, website):
  184. default_lang, default_description = get_website_description(website, None, None)
  185. if default_lang is None or default_description is None:
  186. # the front page can't be fetched: skip this engine
  187. return
  188. wikipedia_languages_r = {V: K for K, V in WIKIPEDIA_LANGUAGES.items()}
  189. languages = ['en', 'es', 'pt', 'ru', 'tr', 'fr']
  190. languages = languages + [l for l in LANGUAGES if l not in languages]
  191. previous_matched_lang = None
  192. previous_count = 0
  193. for lang in languages:
  194. if lang not in descriptions[engine_name]:
  195. fetched_lang, desc = get_website_description(website, lang, WIKIPEDIA_LANGUAGES[lang])
  196. if fetched_lang is None or desc is None:
  197. continue
  198. matched_lang = match_language(fetched_lang, LANGUAGES, fallback=None)
  199. if matched_lang is None:
  200. fetched_wikipedia_lang = match_language(fetched_lang, WIKIPEDIA_LANGUAGES.values(), fallback=None)
  201. matched_lang = wikipedia_languages_r.get(fetched_wikipedia_lang)
  202. if matched_lang is not None:
  203. update_description(engine_name, matched_lang, desc, website, replace=False)
  204. # check if desc changed with the different lang values
  205. if matched_lang == previous_matched_lang:
  206. previous_count += 1
  207. if previous_count == 6:
  208. # the website has returned the same description for 6 different languages in Accept-Language header
  209. # stop now
  210. break
  211. else:
  212. previous_matched_lang = matched_lang
  213. previous_count = 0
  214. def fetch_website_descriptions():
  215. for engine_name, engine in searx.engines.engines.items():
  216. website = getattr(engine, "about", {}).get('website')
  217. if website is None and hasattr(engine, "search_url"):
  218. website = normalize_url(getattr(engine, "search_url"))
  219. if website is None and hasattr(engine, "base_url"):
  220. website = normalize_url(getattr(engine, "base_url"))
  221. if website is not None:
  222. fetch_website_description(engine_name, website)
  223. def get_engine_descriptions_filename():
  224. return join(join(searx_dir, "data"), "engine_descriptions.json")
  225. def get_output():
  226. """
  227. From descriptions[engine][language] = [description, source]
  228. To
  229. * output[language][engine] = description_and_source
  230. * description_and_source can be:
  231. * [description, source]
  232. * description (if source = "wikipedia")
  233. * [f"engine:lang", "ref"] (reference to another existing description)
  234. """
  235. output = {locale: {} for locale in LOCALE_NAMES}
  236. seen_descriptions = {}
  237. for engine_name, lang_descriptions in descriptions.items():
  238. for language, description in lang_descriptions.items():
  239. if description[0] in seen_descriptions:
  240. ref = seen_descriptions[description[0]]
  241. description = [f'{ref[0]}:{ref[1]}', 'ref']
  242. else:
  243. seen_descriptions[description[0]] = (engine_name, language)
  244. if description[1] == 'wikipedia':
  245. description = description[0]
  246. output.setdefault(language, {}).setdefault(engine_name, description)
  247. return output
  248. def main():
  249. initialize()
  250. print('Fetching wikidata descriptions')
  251. fetch_wikidata_descriptions()
  252. print('Fetching wikipedia descriptions')
  253. fetch_wikipedia_descriptions()
  254. print('Fetching website descriptions')
  255. fetch_website_descriptions()
  256. output = get_output()
  257. with open(get_engine_descriptions_filename(), 'w', encoding='utf8') as f:
  258. f.write(json.dumps(output, indent=1, separators=(',', ':'), ensure_ascii=False))
  259. if __name__ == "__main__":
  260. main()