duden.py 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. """
  2. Duden
  3. @website https://www.duden.de
  4. @provide-api no
  5. @using-api no
  6. @results HTML (using search portal)
  7. @stable no (HTML can change)
  8. @parse url, title, content
  9. """
  10. from lxml import html, etree
  11. import re
  12. from urllib.parse import quote, urljoin
  13. from searx.engines.xpath import extract_text
  14. from searx.utils import eval_xpath
  15. from searx import logger
  16. categories = ['general']
  17. paging = True
  18. language_support = False
  19. # search-url
  20. base_url = 'https://www.duden.de/'
  21. search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'
  22. def request(query, params):
  23. '''pre-request callback
  24. params<dict>:
  25. method : POST/GET
  26. headers : {}
  27. data : {} # if method == POST
  28. url : ''
  29. category: 'search category'
  30. pageno : 1 # number of the requested page
  31. '''
  32. offset = (params['pageno'] - 1)
  33. if offset == 0:
  34. search_url_fmt = base_url + 'suchen/dudenonline/{query}'
  35. params['url'] = search_url_fmt.format(query=quote(query))
  36. else:
  37. params['url'] = search_url.format(offset=offset, query=quote(query))
  38. return params
  39. def response(resp):
  40. '''post-response callback
  41. resp: requests response object
  42. '''
  43. results = []
  44. dom = html.fromstring(resp.text)
  45. try:
  46. number_of_results_string =\
  47. re.sub('[^0-9]', '',
  48. eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0])
  49. results.append({'number_of_results': int(number_of_results_string)})
  50. except:
  51. logger.debug("Couldn't read number of results.")
  52. pass
  53. for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
  54. try:
  55. url = eval_xpath(result, './/h2/a')[0].get('href')
  56. url = urljoin(base_url, url)
  57. title = eval_xpath(result, 'string(.//h2/a)').strip()
  58. content = extract_text(eval_xpath(result, './/p'))
  59. # append result
  60. results.append({'url': url,
  61. 'title': title,
  62. 'content': content})
  63. except:
  64. logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
  65. continue
  66. return results