seznam.py 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Seznam
  4. """
  5. from urllib.parse import urlencode
  6. from lxml import html
  7. from searx.network import get
  8. from searx.exceptions import SearxEngineAccessDeniedException
  9. from searx.utils import (
  10. extract_text,
  11. eval_xpath_list,
  12. eval_xpath_getindex,
  13. )
  14. # about
  15. about = {
  16. "website": "https://www.seznam.cz/",
  17. "wikidata_id": "Q3490485",
  18. "official_api_documentation": "https://api.sklik.cz/",
  19. "use_official_api": False,
  20. "require_api_key": False,
  21. "results": "HTML",
  22. "language": "cz",
  23. }
  24. categories = ['general', 'web']
  25. base_url = 'https://search.seznam.cz/'
  26. def request(query, params):
  27. response_index = get(base_url, headers=params['headers'], raise_for_httperror=True)
  28. dom = html.fromstring(response_index.text)
  29. url_params = {
  30. 'q': query,
  31. 'oq': query,
  32. }
  33. for e in eval_xpath_list(dom, '//input[@type="hidden"]'):
  34. name = e.get('name')
  35. value = e.get('value')
  36. url_params[name] = value
  37. params['url'] = base_url + '?' + urlencode(url_params)
  38. params['cookies'] = response_index.cookies
  39. return params
  40. def response(resp):
  41. if resp.url.path.startswith('/verify'):
  42. raise SearxEngineAccessDeniedException()
  43. results = []
  44. dom = html.fromstring(resp.content.decode())
  45. for result_element in eval_xpath_list(
  46. dom, '//div[@id="searchpage-root"]//div[@class="Layout--left"]/div[@class="f2c528"]'
  47. ):
  48. result_data = eval_xpath_getindex(
  49. result_element, './/div[@class="c8774a" or @class="e69e8d a11657"]', 0, default=None
  50. )
  51. if result_data is None:
  52. continue
  53. title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
  54. results.append(
  55. {
  56. 'url': title_element.get('href'),
  57. 'title': extract_text(title_element),
  58. 'content': extract_text(result_data),
  59. }
  60. )
  61. return results