| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980 | """ Duden @website     https://www.duden.de @provide-api no @using-api   no @results     HTML (using search portal) @stable      no (HTML can change) @parse       url, title, content"""from lxml import html, etreeimport refrom searx.engines.xpath import extract_textfrom searx.utils import eval_xpathfrom searx.url_utils import quote, urljoinfrom searx import loggercategories = ['general']paging = Truelanguage_support = False# search-urlbase_url = 'https://www.duden.de/'search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'def request(query, params):    '''pre-request callback    params<dict>:      method  : POST/GET      headers : {}      data    : {} # if method == POST      url     : ''      category: 'search category'      pageno  : 1 # number of the requested page    '''    offset = (params['pageno'] - 1)    if offset == 0:        search_url_fmt = base_url + 'suchen/dudenonline/{query}'        params['url'] = search_url_fmt.format(query=quote(query))    else:        params['url'] = search_url.format(offset=offset, query=quote(query))    return paramsdef response(resp):    '''post-response callback    resp: requests response object    '''    results = []    dom = html.fromstring(resp.text)    try:        number_of_results_string =\            re.sub('[^0-9]', '',                   eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0])        results.append({'number_of_results': int(number_of_results_string)})    except:        logger.debug("Couldn't read number of results.")        pass    for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):        try:            url = eval_xpath(result, './/h2/a')[0].get('href')            url = urljoin(base_url, url)            title = eval_xpath(result, 'string(.//h2/a)').strip()            content = extract_text(eval_xpath(result, './/p'))            # append result            results.append({'url': url,                            'title': title,                            'content': content})        except:            logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))            continue    return results
 |