update_languages.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. #!/usr/bin/env python
  2. # lint: pylint
  3. # SPDX-License-Identifier: AGPL-3.0-or-later
  4. """This script generates languages.py from intersecting each engine's supported
  5. languages.
  6. Output files: :origin:`searx/data/engines_languages.json` and
  7. :origin:`searx/languages.py` (:origin:`CI Update data ...
  8. <.github/workflows/data-update.yml>`).
  9. """
  10. # pylint: disable=invalid-name
  11. from unicodedata import lookup
  12. import json
  13. from pathlib import Path
  14. from pprint import pformat
  15. from babel import Locale, UnknownLocaleError
  16. from babel.languages import get_global
  17. from babel.core import parse_locale
  18. from searx import settings, searx_dir
  19. from searx.engines import load_engines, engines
  20. from searx.network import set_timeout_for_thread
  21. # Output files.
  22. engines_languages_file = Path(searx_dir) / 'data' / 'engines_languages.json'
  23. languages_file = Path(searx_dir) / 'languages.py'
  24. # Fetchs supported languages for each engine and writes json file with those.
  25. def fetch_supported_languages():
  26. set_timeout_for_thread(10.0)
  27. engines_languages = {}
  28. names = list(engines)
  29. names.sort()
  30. for engine_name in names:
  31. if hasattr(engines[engine_name], 'fetch_supported_languages'):
  32. engines_languages[engine_name] = engines[engine_name].fetch_supported_languages()
  33. print("fetched %s languages from engine %s" % (len(engines_languages[engine_name]), engine_name))
  34. if type(engines_languages[engine_name]) == list: # pylint: disable=unidiomatic-typecheck
  35. engines_languages[engine_name] = sorted(engines_languages[engine_name])
  36. print("fetched languages from %s engines" % len(engines_languages))
  37. # write json file
  38. with open(engines_languages_file, 'w', encoding='utf-8') as f:
  39. json.dump(engines_languages, f, indent=2, sort_keys=True)
  40. return engines_languages
  41. # Get babel Locale object from lang_code if possible.
  42. def get_locale(lang_code):
  43. try:
  44. locale = Locale.parse(lang_code, sep='-')
  45. return locale
  46. except (UnknownLocaleError, ValueError):
  47. return None
  48. lang2emoji = {
  49. 'ha': '\U0001F1F3\U0001F1EA', # Hausa / Niger
  50. 'bs': '\U0001F1E7\U0001F1E6', # Bosnian / Bosnia & Herzegovina
  51. 'jp': '\U0001F1EF\U0001F1F5', # Japanese
  52. 'ua': '\U0001F1FA\U0001F1E6', # Ukrainian
  53. 'he': '\U0001F1EE\U0001F1F7', # Hebrew
  54. 'zh': '\U0001F1E8\U0001F1F3', # China (zh)
  55. }
  56. def get_unicode_flag(lang_code):
  57. """Determine a unicode flag (emoji) that fits to the ``lang_code``"""
  58. emoji = lang2emoji.get(lang_code.lower())
  59. if emoji:
  60. return emoji
  61. if len(lang_code) == 2:
  62. l_code = lang_code.lower()
  63. c_code = lang_code.upper()
  64. if c_code == 'EN':
  65. c_code = 'GB'
  66. lang_code = "%s-%s" % (l_code, c_code)
  67. language = territory = script = variant = ''
  68. try:
  69. language, territory, script, variant = parse_locale(lang_code, '-')
  70. except ValueError as exc:
  71. print(exc)
  72. # https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
  73. if not territory:
  74. # https://www.unicode.org/emoji/charts/emoji-list.html#country-flag
  75. emoji = lang2emoji.get(language)
  76. if not emoji:
  77. print(
  78. "%s --> language: %s / territory: %s / script: %s / variant: %s"
  79. % (lang_code, language, territory, script, variant)
  80. )
  81. return emoji
  82. emoji = lang2emoji.get(territory.lower())
  83. if emoji:
  84. return emoji
  85. try:
  86. c1 = lookup('REGIONAL INDICATOR SYMBOL LETTER ' + territory[0])
  87. c2 = lookup('REGIONAL INDICATOR SYMBOL LETTER ' + territory[1])
  88. # print("%s --> territory: %s --> %s%s" %(lang_code, territory, c1, c2 ))
  89. except KeyError as exc:
  90. print("%s --> territory: %s --> %s" % (lang_code, territory, exc))
  91. return None
  92. return c1 + c2
  93. # Join all language lists.
  94. def join_language_lists(engines_languages):
  95. language_list = {}
  96. for engine_name in engines_languages:
  97. for lang_code in engines_languages[engine_name]:
  98. # apply custom fixes if necessary
  99. if lang_code in getattr(engines[engine_name], 'language_aliases', {}).values():
  100. lang_code = next(
  101. lc for lc, alias in engines[engine_name].language_aliases.items() if lang_code == alias
  102. )
  103. locale = get_locale(lang_code)
  104. # ensure that lang_code uses standard language and country codes
  105. if locale and locale.territory:
  106. lang_code = "{lang}-{country}".format(lang=locale.language, country=locale.territory)
  107. short_code = lang_code.split('-')[0]
  108. # add language without country if not in list
  109. if short_code not in language_list:
  110. if locale:
  111. # get language's data from babel's Locale object
  112. language_name = locale.get_language_name().title()
  113. english_name = locale.english_name.split(' (')[0]
  114. elif short_code in engines_languages['wikipedia']:
  115. # get language's data from wikipedia if not known by babel
  116. language_name = engines_languages['wikipedia'][short_code]['name']
  117. english_name = engines_languages['wikipedia'][short_code]['english_name']
  118. else:
  119. language_name = None
  120. english_name = None
  121. # add language to list
  122. language_list[short_code] = {
  123. 'name': language_name,
  124. 'english_name': english_name,
  125. 'counter': set(),
  126. 'countries': {},
  127. }
  128. # add language with country if not in list
  129. if lang_code != short_code and lang_code not in language_list[short_code]['countries']:
  130. country_name = ''
  131. if locale:
  132. # get country name from babel's Locale object
  133. try:
  134. country_name = locale.get_territory_name()
  135. except FileNotFoundError as exc:
  136. print("ERROR: %s --> %s" % (locale, exc))
  137. locale = None
  138. language_list[short_code]['countries'][lang_code] = {
  139. 'country_name': country_name,
  140. 'counter': set(),
  141. }
  142. # count engine for both language_country combination and language alone
  143. language_list[short_code]['counter'].add(engine_name)
  144. if lang_code != short_code:
  145. language_list[short_code]['countries'][lang_code]['counter'].add(engine_name)
  146. return language_list
  147. # Filter language list so it only includes the most supported languages and countries
  148. def filter_language_list(all_languages):
  149. min_engines_per_lang = 13
  150. min_engines_per_country = 7
  151. # pylint: disable=consider-using-dict-items, consider-iterating-dictionary
  152. main_engines = [
  153. engine_name
  154. for engine_name in engines.keys()
  155. if 'general' in engines[engine_name].categories
  156. and engines[engine_name].supported_languages
  157. and not engines[engine_name].disabled
  158. ]
  159. # filter list to include only languages supported by most engines or all default general engines
  160. filtered_languages = {
  161. code: lang
  162. for code, lang in all_languages.items()
  163. if (
  164. len(lang['counter']) >= min_engines_per_lang
  165. or all(main_engine in lang['counter'] for main_engine in main_engines)
  166. )
  167. }
  168. def _copy_lang_data(lang, country_name=None):
  169. new_dict = {}
  170. new_dict['name'] = all_languages[lang]['name']
  171. new_dict['english_name'] = all_languages[lang]['english_name']
  172. if country_name:
  173. new_dict['country_name'] = country_name
  174. return new_dict
  175. # for each language get country codes supported by most engines or at least one country code
  176. filtered_languages_with_countries = {}
  177. for lang, lang_data in filtered_languages.items():
  178. countries = lang_data['countries']
  179. filtered_countries = {}
  180. # get language's country codes with enough supported engines
  181. for lang_country, country_data in countries.items():
  182. if len(country_data['counter']) >= min_engines_per_country:
  183. filtered_countries[lang_country] = _copy_lang_data(lang, country_data['country_name'])
  184. # add language without countries too if there's more than one country to choose from
  185. if len(filtered_countries) > 1:
  186. filtered_countries[lang] = _copy_lang_data(lang, None)
  187. elif len(filtered_countries) == 1:
  188. # if there's only one country per language, it's not necessary to show country name
  189. lang_country = next(iter(filtered_countries))
  190. filtered_countries[lang_country]['country_name'] = None
  191. # if no country has enough engines try to get most likely country code from babel
  192. if not filtered_countries:
  193. lang_country = None
  194. subtags = get_global('likely_subtags').get(lang)
  195. if subtags:
  196. country_code = subtags.split('_')[-1]
  197. if len(country_code) == 2:
  198. lang_country = "{lang}-{country}".format(lang=lang, country=country_code)
  199. if lang_country:
  200. filtered_countries[lang_country] = _copy_lang_data(lang, None)
  201. else:
  202. filtered_countries[lang] = _copy_lang_data(lang, None)
  203. filtered_languages_with_countries.update(filtered_countries)
  204. return filtered_languages_with_countries
  205. class UnicodeEscape(str):
  206. """Escape unicode string in :py:obj:`pprint.pformat`"""
  207. def __repr__(self):
  208. return "'" + "".join([chr(c) for c in self.encode('unicode-escape')]) + "'"
  209. # Write languages.py.
  210. def write_languages_file(languages):
  211. file_headers = (
  212. "# -*- coding: utf-8 -*-",
  213. "# list of language codes",
  214. "# this file is generated automatically by utils/fetch_languages.py",
  215. "language_codes = (\n",
  216. )
  217. language_codes = []
  218. for code in sorted(languages):
  219. name = languages[code]['name']
  220. if name is None:
  221. print("ERROR: languages['%s'] --> %s" % (code, languages[code]))
  222. continue
  223. flag = get_unicode_flag(code) or ''
  224. item = (
  225. code,
  226. languages[code]['name'].split(' (')[0],
  227. languages[code].get('country_name') or '',
  228. languages[code].get('english_name') or '',
  229. UnicodeEscape(flag),
  230. )
  231. language_codes.append(item)
  232. language_codes = tuple(language_codes)
  233. with open(languages_file, 'w', encoding='utf-8') as new_file:
  234. file_content = "{file_headers} {language_codes},\n)\n".format(
  235. # fmt: off
  236. file_headers = '\n'.join(file_headers),
  237. language_codes = pformat(language_codes, indent=4)[1:-1]
  238. # fmt: on
  239. )
  240. new_file.write(file_content)
  241. new_file.close()
  242. if __name__ == "__main__":
  243. load_engines(settings['engines'])
  244. _engines_languages = fetch_supported_languages()
  245. _all_languages = join_language_lists(_engines_languages)
  246. _filtered_languages = filter_language_list(_all_languages)
  247. write_languages_file(_filtered_languages)