| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118 | 
""" Yacy (Web, Images, Videos, Music, Files)"""from json import loadsfrom dateutil import parserfrom urllib.parse import urlencodefrom httpx import DigestAuthfrom searx.utils import html_to_textabout = {    "website": 'https://yacy.net/',    "wikidata_id": 'Q1759675',    "official_api_documentation": 'https://wiki.yacy.net/index.php/Dev:API',    "use_official_api": True,    "require_api_key": False,    "results": 'JSON',}categories = ['general', 'images']  paging = Truenumber_of_results = 5http_digest_auth_user = ""http_digest_auth_pass = ""base_url = 'http://localhost:8090'search_url = (    '/yacysearch.json?{query}'    '&startRecord={offset}'    '&maximumRecords={limit}'    '&contentdom={search_type}'    '&resource=global')search_types = {'general': 'text', 'images': 'image', 'files': 'app', 'music': 'audio', 'videos': 'video'}def request(query, params):    offset = (params['pageno'] - 1) * number_of_results    search_type = search_types.get(params.get('category'), '0')    params['url'] = base_url + search_url.format(        query=urlencode({'query': query}), offset=offset, limit=number_of_results, search_type=search_type    )    if http_digest_auth_user and http_digest_auth_pass:        params['auth'] = DigestAuth(http_digest_auth_user, http_digest_auth_pass)        if params['language'] != 'all':        params['url'] += '&lr=lang_' + params['language'].split('-')[0]    return paramsdef response(resp):    results = []    raw_search_results = loads(resp.text)        if not raw_search_results:        return []    search_results = raw_search_results.get('channels', [])    if len(search_results) == 0:        return []    for result in search_results[0].get('items', []):                if resp.search_params.get('category') == 'images':            result_url = ''            if 'url' in result:                result_url = result['url']            elif 'link' in result:                result_url = result['link']            else:                continue                        results.append(                {                    'url': result_url,                    'title': result['title'],                    'content': '',                    'img_src': result['image'],                    'template': 'images.html',                }            )                else:            publishedDate = parser.parse(result['pubDate'])                        results.append(                {                    'url': result['link'],                    'title': result['title'],                    'content': html_to_text(result['description']),                    'publishedDate': publishedDate,                }            )            return results
 |