| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657 | 
""" 1337x"""from urllib.parse import quote, urljoinfrom lxml import htmlfrom searx.utils import extract_text, get_torrent_size, eval_xpath, eval_xpath_list, eval_xpath_getindexabout = {    "website": 'https://1337x.to/',    "wikidata_id": 'Q28134166',    "official_api_documentation": None,    "use_official_api": False,    "require_api_key": False,    "results": 'HTML',}url = 'https://1337x.to/'search_url = url + 'search/{search_term}/{pageno}/'categories = ['files']paging = Truedef request(query, params):    params['url'] = search_url.format(search_term=quote(query), pageno=params['pageno'])    return paramsdef response(resp):    results = []    dom = html.fromstring(resp.text)    for result in eval_xpath_list(dom, '//table[contains(@class, "table-list")]/tbody//tr'):        href = urljoin(url, eval_xpath_getindex(result, './td[contains(@class, "name")]/a[2]/@href', 0))        title = extract_text(eval_xpath(result, './td[contains(@class, "name")]/a[2]'))        seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]'))        leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]'))        filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()'))        filesize, filesize_multiplier = filesize_info.split()        filesize = get_torrent_size(filesize, filesize_multiplier)        results.append(            {                'url': href,                'title': title,                'seed': seed,                'leech': leech,                'filesize': filesize,                'template': 'torrent.html',            }        )    return results
 |