qwant.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. """
  2. Qwant (Web, Images, News, Social)
  3. @website https://qwant.com/
  4. @provide-api not officially (https://api.qwant.com/api/search/)
  5. @using-api yes
  6. @results JSON
  7. @stable yes
  8. @parse url, title, content
  9. """
  10. from datetime import datetime
  11. from json import loads
  12. from urllib.parse import urlencode
  13. from searx.utils import html_to_text, match_language
  14. from searx.exceptions import SearxEngineAPIException, SearxEngineCaptchaException
  15. from searx.raise_for_httperror import raise_for_httperror
  16. # engine dependent config
  17. categories = []
  18. paging = True
  19. language_support = True
  20. supported_languages_url = 'https://qwant.com/region'
  21. category_to_keyword = {'general': 'web',
  22. 'images': 'images',
  23. 'news': 'news'}
  24. # search-url
  25. url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}&t={keyword}&uiv=4'
  26. # do search-request
  27. def request(query, params):
  28. offset = (params['pageno'] - 1) * 10
  29. if categories[0] and categories[0] in category_to_keyword:
  30. params['url'] = url.format(keyword=category_to_keyword[categories[0]],
  31. query=urlencode({'q': query}),
  32. offset=offset)
  33. else:
  34. params['url'] = url.format(keyword='web',
  35. query=urlencode({'q': query}),
  36. offset=offset)
  37. # add language tag
  38. if params['language'] != 'all':
  39. language = match_language(params['language'], supported_languages, language_aliases)
  40. params['url'] += '&locale=' + language.replace('-', '_').lower()
  41. params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0'
  42. params['raise_for_httperror'] = False
  43. return params
  44. # get response from search-request
  45. def response(resp):
  46. results = []
  47. # According to https://www.qwant.com/js/app.js
  48. if resp.status_code == 429:
  49. raise SearxEngineCaptchaException()
  50. # raise for other errors
  51. raise_for_httperror(resp)
  52. # load JSON result
  53. search_results = loads(resp.text)
  54. # check for an API error
  55. if search_results.get('status') != 'success':
  56. raise SearxEngineAPIException('API error ' + str(search_results.get('error', '')))
  57. # return empty array if there are no results
  58. if 'data' not in search_results:
  59. return []
  60. data = search_results.get('data', {})
  61. res = data.get('result', {})
  62. # parse results
  63. for result in res.get('items', {}):
  64. title = html_to_text(result['title'])
  65. res_url = result['url']
  66. content = html_to_text(result['desc'])
  67. if category_to_keyword.get(categories[0], '') == 'web':
  68. results.append({'title': title,
  69. 'content': content,
  70. 'url': res_url})
  71. elif category_to_keyword.get(categories[0], '') == 'images':
  72. thumbnail_src = result['thumbnail']
  73. img_src = result['media']
  74. results.append({'template': 'images.html',
  75. 'url': res_url,
  76. 'title': title,
  77. 'content': '',
  78. 'thumbnail_src': thumbnail_src,
  79. 'img_src': img_src})
  80. elif category_to_keyword.get(categories[0], '') == 'news':
  81. published_date = datetime.fromtimestamp(result['date'], None)
  82. media = result.get('media', [])
  83. if len(media) > 0:
  84. img_src = media[0].get('pict', {}).get('url', None)
  85. else:
  86. img_src = None
  87. results.append({'url': res_url,
  88. 'title': title,
  89. 'publishedDate': published_date,
  90. 'content': content,
  91. 'img_src': img_src})
  92. return results
  93. # get supported languages from their site
  94. def _fetch_supported_languages(resp):
  95. # list of regions is embedded in page as a js object
  96. response_text = resp.text
  97. response_text = response_text[response_text.find('regionalisation'):]
  98. response_text = response_text[response_text.find('{'):response_text.find(');')]
  99. regions_json = loads(response_text)
  100. supported_languages = {}
  101. for lang in regions_json['languages'].values():
  102. for country in lang['countries']:
  103. lang_code = "{lang}-{country}".format(lang=lang['code'], country=country)
  104. supported_languages[lang_code] = {'name': lang['name']}
  105. return supported_languages