| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283 | #  INA (Videos)## @website     https://www.ina.fr/# @provide-api no## @using-api   no# @results     HTML (using search portal)# @stable      no (HTML can change)# @parse       url, title, content, publishedDate, thumbnail## @todo        set content-parameter with correct data# @todo        embedded (needs some md5 from video page)from json import loadsfrom urllib import urlencodefrom lxml import htmlfrom HTMLParser import HTMLParserfrom searx.engines.xpath import extract_textfrom dateutil import parser# engine dependent configcategories = ['videos']paging = Truepage_size = 48# search-urlbase_url = 'https://www.ina.fr'search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}'# specific xpath variablesresults_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]'url_xpath = './/a/@href'title_xpath = './/h3[@class="h3--title media-heading"]'thumbnail_xpath = './/img/@src'publishedDate_xpath = './/span[@class="broadcast"]'content_xpath = './/p[@class="media-body__summary"]'# do search-requestdef request(query, params):    params['url'] = search_url.format(ps=page_size,                                      start=params['pageno'] * page_size,                                      query=urlencode({'q': query}))    return params# get response from search-requestdef response(resp):    results = []    # we get html in a JSON container...    response = loads(resp.text)    if "content" not in response:        return []    dom = html.fromstring(response["content"])    p = HTMLParser()    # parse results    for result in dom.xpath(results_xpath):        videoid = result.xpath(url_xpath)[0]        url = base_url + videoid        title = p.unescape(extract_text(result.xpath(title_xpath)))        thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])        if thumbnail[0] == '/':            thumbnail = base_url + thumbnail        d = extract_text(result.xpath(publishedDate_xpath)[0])        d = d.split('/')        # force ISO date to avoid wrong parsing        d = "%s-%s-%s" % (d[2], d[1], d[0])        publishedDate = parser.parse(d)        content = extract_text(result.xpath(content_xpath))        # append result        results.append({'url': url,                        'title': title,                        'content': content,                        'template': 'videos.html',                        'publishedDate': publishedDate,                        'thumbnail': thumbnail})    # return results    return results
 |