| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139 | # SPDX-License-Identifier: AGPL-3.0-or-later""" Qwant (Web, Images, News, Social)"""from datetime import datetimefrom json import loadsfrom urllib.parse import urlencodefrom searx.utils import html_to_text, match_languagefrom searx.exceptions import SearxEngineAPIException, SearxEngineCaptchaExceptionfrom searx.network import raise_for_httperror# aboutabout = {    "website": 'https://www.qwant.com/',    "wikidata_id": 'Q14657870',    "official_api_documentation": None,    "use_official_api": True,    "require_api_key": False,    "results": 'JSON',}# engine dependent configcategories = []paging = Truesupported_languages_url = about['website']category_to_keyword = {'general': 'web',                       'images': 'images',                       'news': 'news'}# search-urlurl = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}&t={keyword}&uiv=4'# do search-requestdef request(query, params):    offset = (params['pageno'] - 1) * 10    if categories[0] and categories[0] in category_to_keyword:        params['url'] = url.format(keyword=category_to_keyword[categories[0]],                                   query=urlencode({'q': query}),                                   offset=offset)    else:        params['url'] = url.format(keyword='web',                                   query=urlencode({'q': query}),                                   offset=offset)    # add language tag    if params['language'] != 'all':        language = match_language(params['language'], supported_languages, language_aliases)        params['url'] += '&locale=' + language.replace('-', '_').lower()    params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0'    params['raise_for_httperror'] = False    return params# get response from search-requestdef response(resp):    results = []    # According to https://www.qwant.com/js/app.js    if resp.status_code == 429:        raise SearxEngineCaptchaException()    # raise for other errors    raise_for_httperror(resp)    # load JSON result    search_results = loads(resp.text)    # check for an API error    if search_results.get('status') != 'success':        raise SearxEngineAPIException('API error ' + str(search_results.get('error', '')))    # return empty array if there are no results    if 'data' not in search_results:        return []    data = search_results.get('data', {})    res = data.get('result', {})    # parse results    for result in res.get('items', {}):        title = html_to_text(result['title'])        res_url = result['url']        content = html_to_text(result['desc'])        if category_to_keyword.get(categories[0], '') == 'web':            results.append({'title': title,                            'content': content,                            'url': res_url})        elif category_to_keyword.get(categories[0], '') == 'images':            thumbnail_src = result['thumbnail']            img_src = result['media']            results.append({'template': 'images.html',                            'url': res_url,                            'title': title,                            'content': '',                            'thumbnail_src': thumbnail_src,                            'img_src': img_src})        elif category_to_keyword.get(categories[0], '') == 'news':            published_date = datetime.fromtimestamp(result['date'], None)            media = result.get('media', [])            if len(media) > 0:                img_src = media[0].get('pict', {}).get('url', None)            else:                img_src = None            results.append({'url': res_url,                            'title': title,                            'publishedDate': published_date,                            'content': content,                            'img_src': img_src})    return results# get supported languages from their sitedef _fetch_supported_languages(resp):    # list of regions is embedded in page as a js object    response_text = resp.text    response_text = response_text[response_text.find('INITIAL_PROPS'):]    response_text = response_text[response_text.find('{'):response_text.find('</script>')]    regions_json = loads(response_text)    supported_languages = []    for country, langs in regions_json['locales'].items():        for lang in langs['langs']:            lang_code = "{lang}-{country}".format(lang=lang, country=country)            supported_languages.append(lang_code)    return supported_languages
 |