duden.py 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. """
  2. Duden
  3. @website https://www.duden.de
  4. @provide-api no
  5. @using-api no
  6. @results HTML (using search portal)
  7. @stable no (HTML can change)
  8. @parse url, title, content
  9. """
  10. from lxml import html, etree
  11. import re
  12. from urllib.parse import quote, urljoin
  13. from searx.utils import extract_text, eval_xpath
  14. from searx import logger
  15. categories = ['general']
  16. paging = True
  17. language_support = False
  18. # search-url
  19. base_url = 'https://www.duden.de/'
  20. search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'
  21. def request(query, params):
  22. '''pre-request callback
  23. params<dict>:
  24. method : POST/GET
  25. headers : {}
  26. data : {} # if method == POST
  27. url : ''
  28. category: 'search category'
  29. pageno : 1 # number of the requested page
  30. '''
  31. offset = (params['pageno'] - 1)
  32. if offset == 0:
  33. search_url_fmt = base_url + 'suchen/dudenonline/{query}'
  34. params['url'] = search_url_fmt.format(query=quote(query))
  35. else:
  36. params['url'] = search_url.format(offset=offset, query=quote(query))
  37. return params
  38. def response(resp):
  39. '''post-response callback
  40. resp: requests response object
  41. '''
  42. results = []
  43. dom = html.fromstring(resp.text)
  44. try:
  45. number_of_results_string =\
  46. re.sub('[^0-9]', '',
  47. eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0])
  48. results.append({'number_of_results': int(number_of_results_string)})
  49. except:
  50. logger.debug("Couldn't read number of results.")
  51. pass
  52. for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
  53. try:
  54. url = eval_xpath(result, './/h2/a')[0].get('href')
  55. url = urljoin(base_url, url)
  56. title = eval_xpath(result, 'string(.//h2/a)').strip()
  57. content = extract_text(eval_xpath(result, './/p'))
  58. # append result
  59. results.append({'url': url,
  60. 'title': title,
  61. 'content': content})
  62. except:
  63. logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
  64. continue
  65. return results