destatis.py 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """DeStatis
  4. """
  5. from urllib.parse import urlencode
  6. from lxml import html
  7. from searx.utils import eval_xpath, eval_xpath_list, extract_text
  8. about = {
  9. 'website': 'https://www.destatis.de',
  10. 'official_api_documentation': 'https://destatis.api.bund.dev/',
  11. 'use_official_api': False,
  12. 'require_api_key': False,
  13. 'results': 'HTML',
  14. 'language': 'de',
  15. }
  16. categories = []
  17. paging = True
  18. base_url = "https://www.destatis.de"
  19. search_url = f"{base_url}/SiteGlobals/Forms/Suche/Expertensuche_Formular.html"
  20. # pylint: disable-next=line-too-long
  21. results_xpath = '//div[contains(@class, "l-content-wrapper")]/div[contains(@class, "row")]/div[contains(@class, "column")]/div[contains(@class, "c-result"){extra}]'
  22. results_xpath_filter_recommended = " and not(contains(@class, 'c-result--recommended'))"
  23. url_xpath = './/a/@href'
  24. title_xpath = './/a/text()'
  25. date_xpath = './/a/span[contains(@class, "c-result__date")]'
  26. content_xpath = './/div[contains(@class, "column")]/p/text()'
  27. doctype_xpath = './/div[contains(@class, "c-result__doctype")]/p'
  28. def request(query, params):
  29. args = {
  30. 'templateQueryString': query,
  31. 'gtp': f"474_list%3D{params['pageno']}",
  32. }
  33. params['url'] = f"{search_url}?{urlencode(args)}"
  34. return params
  35. def response(resp):
  36. results = []
  37. dom = html.fromstring(resp.text)
  38. # filter out suggested results on further page because they're the same on each page
  39. extra_xpath = results_xpath_filter_recommended if resp.search_params['pageno'] > 1 else ''
  40. res_xpath = results_xpath.format(extra=extra_xpath)
  41. for result in eval_xpath_list(dom, res_xpath):
  42. doctype = extract_text(eval_xpath(result, doctype_xpath))
  43. date = extract_text(eval_xpath(result, date_xpath))
  44. metadata = [meta for meta in (doctype, date) if meta != ""]
  45. results.append(
  46. {
  47. 'url': base_url + "/" + extract_text(eval_xpath(result, url_xpath)),
  48. 'title': extract_text(eval_xpath(result, title_xpath)),
  49. 'content': extract_text(eval_xpath(result, content_xpath)),
  50. 'metadata': ', '.join(metadata),
  51. }
  52. )
  53. return results