update_engine_descriptions.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. #!/usr/bin/env python
  2. import sys
  3. import json
  4. from urllib.parse import quote, urlparse
  5. import detect_language
  6. from lxml.html import fromstring
  7. from searx.engines.wikidata import send_wikidata_query
  8. from searx.utils import extract_text
  9. from searx.locales import LOCALE_NAMES
  10. import searx
  11. import searx.search
  12. import searx.network
  13. SPARQL_WIKIPEDIA_ARTICLE = """
  14. SELECT DISTINCT ?item ?name
  15. WHERE {
  16. VALUES ?item { %IDS% }
  17. ?article schema:about ?item ;
  18. schema:inLanguage ?lang ;
  19. schema:name ?name ;
  20. schema:isPartOf [ wikibase:wikiGroup "wikipedia" ] .
  21. FILTER(?lang in (%LANGUAGES_SPARQL%)) .
  22. FILTER (!CONTAINS(?name, ':')) .
  23. }
  24. """
  25. SPARQL_DESCRIPTION = """
  26. SELECT DISTINCT ?item ?itemDescription
  27. WHERE {
  28. VALUES ?item { %IDS% }
  29. ?item schema:description ?itemDescription .
  30. FILTER (lang(?itemDescription) in (%LANGUAGES_SPARQL%))
  31. }
  32. ORDER BY ?itemLang
  33. """
  34. LANGUAGES = LOCALE_NAMES.keys()
  35. LANGUAGES_SPARQL = ', '.join(set(map(lambda l: repr(l.split('_')[0]), LANGUAGES)))
  36. IDS = None
  37. descriptions = {}
  38. wd_to_engine_name = {}
  39. def normalize_description(description):
  40. for c in [chr(c) for c in range(0, 31)]:
  41. description = description.replace(c, ' ')
  42. description = ' '.join(description.strip().split())
  43. return description
  44. def update_description(engine_name, lang, description, source, replace=True):
  45. if replace or lang not in descriptions[engine_name]:
  46. descriptions[engine_name][lang] = [normalize_description(description), source]
  47. def get_wikipedia_summary(language, pageid):
  48. search_url = 'https://{language}.wikipedia.org/api/rest_v1/page/summary/{title}'
  49. url = search_url.format(title=quote(pageid), language=language)
  50. try:
  51. response = searx.network.get(url)
  52. response.raise_for_status()
  53. api_result = json.loads(response.text)
  54. return api_result.get('extract')
  55. except:
  56. return None
  57. def detect_language(text):
  58. r = cld3.get_language(str(text)) # pylint: disable=E1101
  59. if r is not None and r.probability >= 0.98 and r.is_reliable:
  60. return r.language
  61. return None
  62. def get_website_description(url, lang1, lang2=None):
  63. headers = {
  64. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0',
  65. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  66. 'DNT': '1',
  67. 'Upgrade-Insecure-Requests': '1',
  68. 'Sec-GPC': '1',
  69. 'Cache-Control': 'max-age=0',
  70. }
  71. if lang1 is not None:
  72. lang_list = [lang1]
  73. if lang2 is not None:
  74. lang_list.append(lang2)
  75. headers['Accept-Language'] = f'{",".join(lang_list)};q=0.8'
  76. try:
  77. response = searx.network.get(url, headers=headers, timeout=10)
  78. response.raise_for_status()
  79. except Exception:
  80. return (None, None)
  81. try:
  82. html = fromstring(response.text)
  83. except ValueError:
  84. html = fromstring(response.content)
  85. description = extract_text(html.xpath('/html/head/meta[@name="description"]/@content'))
  86. if not description:
  87. description = extract_text(html.xpath('/html/head/meta[@property="og:description"]/@content'))
  88. if not description:
  89. description = extract_text(html.xpath('/html/head/title'))
  90. lang = extract_text(html.xpath('/html/@lang'))
  91. if lang is None and len(lang1) > 0:
  92. lang = lang1
  93. lang = detect_language(description) or lang or 'en'
  94. lang = lang.split('_')[0]
  95. lang = lang.split('-')[0]
  96. return (lang, description)
  97. def initialize():
  98. global descriptions, wd_to_engine_name, IDS
  99. searx.search.initialize()
  100. for engine_name, engine in searx.engines.engines.items():
  101. descriptions[engine_name] = {}
  102. wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
  103. if wikidata_id is not None:
  104. wd_to_engine_name.setdefault(wikidata_id, set()).add(engine_name)
  105. IDS = ' '.join(list(map(lambda wd_id: 'wd:' + wd_id, wd_to_engine_name.keys())))
  106. def fetch_wikidata_descriptions():
  107. global IDS
  108. result = send_wikidata_query(SPARQL_DESCRIPTION
  109. .replace('%IDS%', IDS)
  110. .replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL))
  111. if result is not None:
  112. for binding in result['results']['bindings']:
  113. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  114. lang = binding['itemDescription']['xml:lang']
  115. description = binding['itemDescription']['value']
  116. if ' ' in description: # skip unique word description (like "website")
  117. for engine_name in wd_to_engine_name[wikidata_id]:
  118. update_description(engine_name, lang, description, 'wikidata')
  119. def fetch_wikipedia_descriptions():
  120. global IDS
  121. result = send_wikidata_query(SPARQL_WIKIPEDIA_ARTICLE
  122. .replace('%IDS%', IDS)
  123. .replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL))
  124. if result is not None:
  125. for binding in result['results']['bindings']:
  126. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  127. lang = binding['name']['xml:lang']
  128. pageid = binding['name']['value']
  129. description = get_wikipedia_summary(lang, pageid)
  130. if description is not None and ' ' in description:
  131. for engine_name in wd_to_engine_name[wikidata_id]:
  132. update_description(engine_name, lang, description, 'wikipedia')
  133. def normalize_url(url):
  134. url = url.replace('{language}', 'en')
  135. url = urlparse(url)._replace(path='/', params='', query='', fragment='').geturl()
  136. url = url.replace('https://api.', 'https://')
  137. return url
  138. def fetch_website_description(engine_name, website):
  139. default_lang, default_description = get_website_description(website, None, None)
  140. if default_lang is None or default_description is None:
  141. return
  142. if default_lang not in descriptions[engine_name]:
  143. descriptions[engine_name][default_lang] = [normalize_description(default_description), website]
  144. for request_lang in ('en-US', 'es-US', 'fr-FR', 'zh', 'ja', 'ru', 'ar', 'ko'):
  145. if request_lang.split('-')[0] not in descriptions[engine_name]:
  146. lang, desc = get_website_description(website, request_lang, request_lang.split('-')[0])
  147. if desc is not None and desc != default_description:
  148. update_description(engine_name, lang, desc, website, replace=False)
  149. else:
  150. break
  151. def fetch_website_descriptions():
  152. for engine_name, engine in searx.engines.engines.items():
  153. website = getattr(engine, "about", {}).get('website')
  154. if website is None:
  155. website = normalize_url(getattr(engine, "search_url"))
  156. if website is None:
  157. website = normalize_url(getattr(engine, "base_url"))
  158. if website is not None:
  159. fetch_website_description(engine_name, website)
  160. def main():
  161. initialize()
  162. fetch_wikidata_descriptions()
  163. fetch_wikipedia_descriptions()
  164. fetch_website_descriptions()
  165. sys.stdout.write(json.dumps(descriptions, indent=1, separators=(',', ':'), ensure_ascii=False))
  166. if __name__ == "__main__":
  167. main()