| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116 | # SPDX-License-Identifier: AGPL-3.0-or-later# lint: pylint"""Google Play Apps & Google Play Movies"""from urllib.parse import urlencodefrom lxml import htmlfrom searx.utils import (    eval_xpath,    extract_url,    extract_text,    eval_xpath_list,    eval_xpath_getindex,)about = {    "website": "https://play.google.com/",    "wikidata_id": "Q79576",    "use_official_api": False,    "require_api_key": False,    "results": "HTML",}send_accept_language_header = Trueplay_categ = None  # apps|moviesbase_url = 'https://play.google.com'search_url = base_url + "/store/search?{query}&c={play_categ}"def request(query, params):    if play_categ not in ('movies', 'apps'):        raise ValueError(f"unknown google play category: {play_categ}")    params["url"] = search_url.format(        query=urlencode({"q": query}),        play_categ=play_categ,    )    params['cookies']['CONSENT'] = "YES+"    return paramsdef response(resp):    if play_categ == 'movies':        return response_movies(resp)    if play_categ == 'apps':        return response_apps(resp)    raise ValueError(f"Unsupported play category: {play_categ}")def response_movies(resp):    results = []    dom = html.fromstring(resp.text)    for section in eval_xpath(dom, '//c-wiz/section/header/..'):        sec_name = extract_text(eval_xpath(section, './header'))        for item in eval_xpath(section, './/a'):            url = base_url + item.get('href')            div_1, div_2 = eval_xpath(item, './div')[:2]            title = extract_text(eval_xpath(div_2, './div[@title]'))            metadata = extract_text(eval_xpath(div_2, './div[@class]'))            img = eval_xpath(div_1, './/img')[0]            img_src = img.get('src')            results.append(                {                    "url": url,                    "title": title,                    "content": sec_name,                    "img_src": img_src,                    'metadata': metadata,                    'template': 'videos.html',                }            )    return resultsdef response_apps(resp):    results = []    dom = html.fromstring(resp.text)    if eval_xpath(dom, '//div[@class="v6DsQb"]'):        return []    spot = eval_xpath_getindex(dom, '//div[@class="ipRz4"]', 0, None)    if spot is not None:        url = extract_url(eval_xpath(spot, './a[@class="Qfxief"]/@href'), search_url)        title = extract_text(eval_xpath(spot, './/div[@class="vWM94c"]'))        content = extract_text(eval_xpath(spot, './/div[@class="LbQbAe"]'))        img = extract_text(eval_xpath(spot, './/img[@class="T75of bzqKMd"]/@src'))        results.append({"url": url, "title": title, "content": content, "img_src": img})    more = eval_xpath_list(dom, '//c-wiz[@jsrenderer="RBsfwb"]//div[@role="listitem"]', min_len=1)    for result in more:        url = extract_url(eval_xpath(result, ".//a/@href"), search_url)        title = extract_text(eval_xpath(result, './/span[@class="DdYX5"]'))        content = extract_text(eval_xpath(result, './/span[@class="wMUdtb"]'))        img = extract_text(            eval_xpath(                result,                './/img[@class="T75of stzEZd" or @class="T75of etjhNc Q8CSx "]/@src',            )        )        results.append({"url": url, "title": title, "content": content, "img_src": img})    for suggestion in eval_xpath_list(dom, '//c-wiz[@jsrenderer="qyd4Kb"]//div[@class="ULeU3b neq64b"]'):        results.append({"suggestion": extract_text(eval_xpath(suggestion, './/div[@class="Epkrse "]'))})    return results
 |