sjp.py 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Słownik Języka Polskiego (general)
  3. """
  4. from lxml.html import fromstring
  5. from searx import logger
  6. from searx.utils import extract_text
  7. from searx.network import raise_for_httperror
  8. logger = logger.getChild('sjp engine')
  9. # about
  10. about = {
  11. "website": 'https://sjp.pwn.pl',
  12. "wikidata_id": 'Q55117369',
  13. "official_api_documentation": None,
  14. "use_official_api": False,
  15. "require_api_key": False,
  16. "results": 'HTML',
  17. "language": 'pl',
  18. }
  19. categories = ['dictionaries']
  20. paging = False
  21. URL = 'https://sjp.pwn.pl'
  22. SEARCH_URL = URL + '/szukaj/{query}.html'
  23. word_xpath = '//div[@class="query"]'
  24. dict_xpath = [
  25. '//div[@class="wyniki sjp-so-wyniki sjp-so-anchor"]',
  26. '//div[@class="wyniki sjp-wyniki sjp-anchor"]',
  27. '//div[@class="wyniki sjp-doroszewski-wyniki sjp-doroszewski-anchor"]',
  28. ]
  29. def request(query, params):
  30. params['url'] = SEARCH_URL.format(query=query)
  31. logger.debug(f"query_url --> {params['url']}")
  32. return params
  33. def response(resp):
  34. results = []
  35. raise_for_httperror(resp)
  36. dom = fromstring(resp.text)
  37. word = extract_text(dom.xpath(word_xpath))
  38. definitions = []
  39. for dict_src in dict_xpath:
  40. for src in dom.xpath(dict_src):
  41. src_text = extract_text(src.xpath('.//span[@class="entry-head-title"]/text()')).strip()
  42. src_defs = []
  43. for def_item in src.xpath('.//div[contains(@class, "ribbon-element")]'):
  44. if def_item.xpath('./div[@class="znacz"]'):
  45. sub_defs = []
  46. for def_sub_item in def_item.xpath('./div[@class="znacz"]'):
  47. def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ')
  48. sub_defs.append(def_sub_text)
  49. src_defs.append((word, sub_defs))
  50. else:
  51. def_text = extract_text(def_item).strip()
  52. def_link = def_item.xpath('./span/a/@href')
  53. if 'doroszewski' in def_link[0]:
  54. def_text = f"<a href='{def_link[0]}'>{def_text}</a>"
  55. src_defs.append((def_text, ''))
  56. definitions.append((src_text, src_defs))
  57. if not definitions:
  58. return results
  59. infobox = ''
  60. for src in definitions:
  61. infobox += f"<div><small>{src[0]}</small>"
  62. infobox += "<ul>"
  63. for (def_text, sub_def) in src[1]:
  64. infobox += f"<li>{def_text}</li>"
  65. if sub_def:
  66. infobox += "<ol>"
  67. for sub_def_text in sub_def:
  68. infobox += f"<li>{sub_def_text}</li>"
  69. infobox += "</ol>"
  70. infobox += "</ul></div>"
  71. results.append(
  72. {
  73. 'infobox': word,
  74. 'content': infobox,
  75. }
  76. )
  77. return results