wikipedia.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Wikipedia (Web)
  4. """
  5. from urllib.parse import quote
  6. from json import loads
  7. from lxml.html import fromstring
  8. from searx.utils import match_language, searx_useragent
  9. from searx.network import raise_for_httperror
  10. # about
  11. about = {
  12. "website": 'https://www.wikipedia.org/',
  13. "wikidata_id": 'Q52',
  14. "official_api_documentation": 'https://en.wikipedia.org/api/',
  15. "use_official_api": True,
  16. "require_api_key": False,
  17. "results": 'JSON',
  18. }
  19. send_accept_language_header = True
  20. # search-url
  21. search_url = 'https://{language}.wikipedia.org/api/rest_v1/page/summary/{title}'
  22. supported_languages_url = 'https://meta.wikimedia.org/wiki/List_of_Wikipedias'
  23. language_variants = {"zh": ("zh-cn", "zh-hk", "zh-mo", "zh-my", "zh-sg", "zh-tw")}
  24. # set language in base_url
  25. def url_lang(lang):
  26. lang_pre = lang.split('-')[0]
  27. if lang_pre == 'all' or lang_pre not in supported_languages and lang_pre not in language_aliases:
  28. return 'en'
  29. return match_language(lang, supported_languages, language_aliases).split('-')[0]
  30. # do search-request
  31. def request(query, params):
  32. if query.islower():
  33. query = query.title()
  34. language = url_lang(params['language'])
  35. params['url'] = search_url.format(title=quote(query), language=language)
  36. params['headers']['User-Agent'] = searx_useragent()
  37. params['raise_for_httperror'] = False
  38. params['soft_max_redirects'] = 2
  39. return params
  40. # get response from search-request
  41. def response(resp):
  42. if resp.status_code == 404:
  43. return []
  44. if resp.status_code == 400:
  45. try:
  46. api_result = loads(resp.text)
  47. except:
  48. pass
  49. else:
  50. if (
  51. api_result['type'] == 'https://mediawiki.org/wiki/HyperSwitch/errors/bad_request'
  52. and api_result['detail'] == 'title-invalid-characters'
  53. ):
  54. return []
  55. raise_for_httperror(resp)
  56. results = []
  57. api_result = loads(resp.text)
  58. # skip disambiguation pages
  59. if api_result.get('type') != 'standard':
  60. return []
  61. title = api_result['title']
  62. wikipedia_link = api_result['content_urls']['desktop']['page']
  63. results.append({'url': wikipedia_link, 'title': title})
  64. results.append(
  65. {
  66. 'infobox': title,
  67. 'id': wikipedia_link,
  68. 'content': api_result.get('extract', ''),
  69. 'img_src': api_result.get('thumbnail', {}).get('source'),
  70. 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}],
  71. }
  72. )
  73. return results
  74. # get supported languages from their site
  75. def _fetch_supported_languages(resp):
  76. supported_languages = {}
  77. dom = fromstring(resp.text)
  78. tables = dom.xpath('//table[contains(@class,"sortable")]')
  79. for table in tables:
  80. # exclude header row
  81. trs = table.xpath('.//tr')[1:]
  82. for tr in trs:
  83. td = tr.xpath('./td')
  84. code = td[3].xpath('./a')[0].text
  85. name = td[1].xpath('./a')[0].text
  86. english_name = td[1].xpath('./a')[0].text
  87. articles = int(td[4].xpath('./a')[0].text.replace(',', ''))
  88. # exclude languages with too few articles
  89. if articles >= 100:
  90. supported_languages[code] = {"name": name, "english_name": english_name}
  91. return supported_languages