| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586 | """ Subtitleseeker (Video) @website     http://www.subtitleseeker.com @provide-api no @using-api   no @results     HTML @stable      no (HTML can change) @parse       url, title, content"""from lxml import htmlfrom searx.languages import language_codesfrom searx.engines.xpath import extract_textfrom searx.url_utils import quote_plus# engine dependent configcategories = ['videos']paging = Truelanguage = ""# search-urlurl = 'http://www.subtitleseeker.com/'search_url = url + 'search/TITLES/{query}?p={pageno}'# specific xpath variablesresults_xpath = '//div[@class="boxRows"]'# do search-requestdef request(query, params):    params['url'] = search_url.format(query=quote_plus(query),                                      pageno=params['pageno'])    return params# get response from search-requestdef response(resp):    results = []    dom = html.fromstring(resp.text)    search_lang = ""    # dirty fix for languages named differenly in their site    if resp.search_params['language'][:2] == 'fa':        search_lang = 'Farsi'    elif resp.search_params['language'] == 'pt-BR':        search_lang = 'Brazilian'    else:        search_lang = [lc[3]                       for lc in language_codes                       if lc[0].split('-')[0] == resp.search_params['language'].split('-')[0]]        search_lang = search_lang[0].split(' (')[0]    # parse results    for result in dom.xpath(results_xpath):        link = result.xpath(".//a")[0]        href = link.attrib.get('href')        if language is not "":            href = href + language + '/'        elif search_lang:            href = href + search_lang + '/'        title = extract_text(link)        content = extract_text(result.xpath('.//div[contains(@class,"red")]'))        content = content + " - "        text = extract_text(result.xpath('.//div[contains(@class,"grey-web")]')[0])        content = content + text        if result.xpath(".//span") != []:            content = content +\                " - (" +\                extract_text(result.xpath(".//span")) +\                ")"        # append result        results.append({'url': href,                        'title': title,                        'content': content})    # return results    return results
 |