wikipedia.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. """
  2. Wikipedia (Web)
  3. @website https://en.wikipedia.org/api/rest_v1/
  4. @provide-api yes
  5. @using-api yes
  6. @results JSON
  7. @stable yes
  8. @parse url, infobox
  9. """
  10. from urllib.parse import quote
  11. from json import loads
  12. from lxml.html import fromstring
  13. from searx.utils import match_language, searx_useragent
  14. # search-url
  15. search_url = 'https://{language}.wikipedia.org/api/rest_v1/page/summary/{title}'
  16. supported_languages_url = 'https://meta.wikimedia.org/wiki/List_of_Wikipedias'
  17. # set language in base_url
  18. def url_lang(lang):
  19. lang_pre = lang.split('-')[0]
  20. if lang_pre == 'all' or lang_pre not in supported_languages and lang_pre not in language_aliases:
  21. return 'en'
  22. return match_language(lang, supported_languages, language_aliases).split('-')[0]
  23. # do search-request
  24. def request(query, params):
  25. if query.islower():
  26. query = query.title()
  27. params['url'] = search_url.format(title=quote(query),
  28. language=url_lang(params['language']))
  29. params['headers']['User-Agent'] = searx_useragent()
  30. params['raise_for_status'] = False
  31. params['soft_max_redirects'] = 2
  32. return params
  33. # get response from search-request
  34. def response(resp):
  35. if resp.status_code == 404:
  36. return []
  37. results = []
  38. api_result = loads(resp.text)
  39. # skip disambiguation pages
  40. if api_result['type'] != 'standard':
  41. return []
  42. title = api_result['title']
  43. wikipedia_link = api_result['content_urls']['desktop']['page']
  44. results.append({'url': wikipedia_link, 'title': title})
  45. results.append({'infobox': title,
  46. 'id': wikipedia_link,
  47. 'content': api_result.get('extract', ''),
  48. 'img_src': api_result.get('thumbnail', {}).get('source'),
  49. 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}]})
  50. return results
  51. # get supported languages from their site
  52. def _fetch_supported_languages(resp):
  53. supported_languages = {}
  54. dom = fromstring(resp.text)
  55. tables = dom.xpath('//table[contains(@class,"sortable")]')
  56. for table in tables:
  57. # exclude header row
  58. trs = table.xpath('.//tr')[1:]
  59. for tr in trs:
  60. td = tr.xpath('./td')
  61. code = td[3].xpath('./a')[0].text
  62. name = td[2].xpath('./a')[0].text
  63. english_name = td[1].xpath('./a')[0].text
  64. articles = int(td[4].xpath('./a/b')[0].text.replace(',', ''))
  65. # exclude languages with too few articles
  66. if articles >= 100:
  67. supported_languages[code] = {"name": name, "english_name": english_name, "articles": articles}
  68. return supported_languages