| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137 | # SPDX-License-Identifier: AGPL-3.0-or-later# lint: pylint"""This is the implementation of the Google Videos engine... admonition:: Content-Security-Policy (CSP)   This engine needs to allow images from the `data URLs`_ (prefixed with the   ``data:`` scheme)::     Header set Content-Security-Policy "img-src 'self' data: ;".. _data URLs:   https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs"""from typing import TYPE_CHECKINGfrom urllib.parse import urlencodefrom lxml import htmlfrom searx.utils import (    eval_xpath,    eval_xpath_list,    eval_xpath_getindex,    extract_text,)from searx.engines.google import fetch_traits  # pylint: disable=unused-importfrom searx.engines.google import (    get_google_info,    time_range_dict,    filter_mapping,    suggestion_xpath,    detect_google_sorry,)from searx.enginelib.traits import EngineTraitsif TYPE_CHECKING:    import logging    logger: logging.Loggertraits: EngineTraits# aboutabout = {    "website": 'https://www.google.com',    "wikidata_id": 'Q219885',    "official_api_documentation": 'https://developers.google.com/custom-search',    "use_official_api": False,    "require_api_key": False,    "results": 'HTML',}# engine dependent configcategories = ['videos', 'web']paging = Truelanguage_support = Truetime_range_support = Truesafesearch = Truedef request(query, params):    """Google-Video search request"""    google_info = get_google_info(params, traits)    query_url = (        'https://'        + google_info['subdomain']        + '/search'        + "?"        + urlencode(            {                'q': query,                'tbm': "vid",                'start': 10 * params['pageno'],                **google_info['params'],                'asearch': 'arc',                'async': 'use_ac:true,_fmt:html',            }        )    )    if params['time_range'] in time_range_dict:        query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})    if params['safesearch']:        query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})    params['url'] = query_url    params['cookies'] = google_info['cookies']    params['headers'].update(google_info['headers'])    return paramsdef response(resp):    """Get response from google's search request"""    results = []    detect_google_sorry(resp)    # convert the text to dom    dom = html.fromstring(resp.text)    # parse results    for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):        img_src = eval_xpath_getindex(result, './/img/@src', 0, None)        if img_src is None:            continue        title = extract_text(eval_xpath_getindex(result, './/a/h3[1]', 0))        url = eval_xpath_getindex(result, './/a/h3[1]/../@href', 0)        c_node = eval_xpath_getindex(result, './/div[@class="ITZIwc"]', 0)        content = extract_text(c_node)        pub_info = extract_text(eval_xpath(result, './/div[@class="gqF9jc"]'))        results.append(            {                'url': url,                'title': title,                'content': content,                'author': pub_info,                'thumbnail': img_src,                'template': 'videos.html',            }        )    # parse suggestion    for suggestion in eval_xpath_list(dom, suggestion_xpath):        # append suggestion        results.append({'suggestion': extract_text(suggestion)})    return results
 |