| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485 | # SPDX-License-Identifier: AGPL-3.0-or-later""" INA (Videos)"""from json import loadsfrom html import unescapefrom urllib.parse import urlencodefrom lxml import htmlfrom dateutil import parserfrom searx.utils import extract_text# aboutabout = {    "website": 'https://www.ina.fr/',    "wikidata_id": 'Q1665109',    "official_api_documentation": None,    "use_official_api": False,    "require_api_key": False,    "results": 'HTML',}# engine dependent configcategories = ['videos']paging = Truepage_size = 48# search-urlbase_url = 'https://www.ina.fr'search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}'# specific xpath variablesresults_xpath = '//div[contains(@class,"search-results--list")]//div[@class="media-body"]'url_xpath = './/a/@href'title_xpath = './/h3[@class="h3--title media-heading"]'thumbnail_xpath = './/img/@src'publishedDate_xpath = './/span[@class="broadcast"]'content_xpath = './/p[@class="media-body__summary"]'# do search-requestdef request(query, params):    params['url'] = search_url.format(ps=page_size,                                      start=params['pageno'] * page_size,                                      query=urlencode({'q': query}))    return params# get response from search-requestdef response(resp):    results = []    # we get html in a JSON container...    response = loads(resp.text)    dom = html.fromstring(response)    # parse results    for result in dom.xpath(results_xpath):        videoid = result.xpath(url_xpath)[0]        url = base_url + videoid        title = unescape(extract_text(result.xpath(title_xpath)))        try:            thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])        except:            thumbnail = ''        if thumbnail and thumbnail[0] == '/':            thumbnail = base_url + thumbnail        d = extract_text(result.xpath(publishedDate_xpath)[0])        d = d.split('/')        # force ISO date to avoid wrong parsing        d = "%s-%s-%s" % (d[2], d[1], d[0])        publishedDate = parser.parse(d)        content = extract_text(result.xpath(content_xpath))        # append result        results.append({'url': url,                        'title': title,                        'content': content,                        'template': 'videos.html',                        'publishedDate': publishedDate,                        'thumbnail': thumbnail})    # return results    return results
 |