| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667 | # SPDX-License-Identifier: AGPL-3.0-or-later""" not Evil (Onions)"""from urllib.parse import urlencodefrom lxml import htmlfrom searx.engines.xpath import extract_text# aboutabout = {    "website": 'http://hss3uro2hsxfogfq.onion',    "wikidata_id": None,    "official_api_documentation": 'http://hss3uro2hsxfogfq.onion/api.htm',    "use_official_api": False,    "require_api_key": False,    "results": 'HTML',}# engine dependent configcategories = ['onions']paging = Truepage_size = 20# search-urlbase_url = 'http://hss3uro2hsxfogfq.onion/'search_url = 'index.php?{query}&hostLimit=20&start={pageno}&numRows={page_size}'# specific xpath variablesresults_xpath = '//*[@id="content"]/div/p'url_xpath = './span[1]'title_xpath = './a[1]'content_xpath = './text()'# do search-requestdef request(query, params):    offset = (params['pageno'] - 1) * page_size    params['url'] = base_url + search_url.format(pageno=offset,                                                 query=urlencode({'q': query}),                                                 page_size=page_size)    return params# get response from search-requestdef response(resp):    results = []    # needed because otherwise requests guesses wrong encoding    resp.encoding = 'utf8'    dom = html.fromstring(resp.text)    # parse results    for result in dom.xpath(results_xpath):        url = extract_text(result.xpath(url_xpath)[0])        title = extract_text(result.xpath(title_xpath)[0])        content = extract_text(result.xpath(content_xpath))        # append result        results.append({'url': url,                        'title': title,                        'content': content,                        'is_onion': True})    return results
 |