doku.py 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Doku Wiki
  4. """
  5. from urllib.parse import urlencode
  6. from urllib.parse import urljoin
  7. from lxml.html import fromstring
  8. from searx.utils import extract_text, eval_xpath
  9. # about
  10. about = {
  11. "website": 'https://www.dokuwiki.org/',
  12. "wikidata_id": 'Q851864',
  13. "official_api_documentation": 'https://www.dokuwiki.org/devel:xmlrpc',
  14. "use_official_api": False,
  15. "require_api_key": False,
  16. "results": 'HTML',
  17. }
  18. # engine dependent config
  19. categories = ['general'] # 'images', 'music', 'videos', 'files'
  20. paging = False
  21. number_of_results = 5
  22. # search-url
  23. # Doku is OpenSearch compatible
  24. base_url = 'http://localhost:8090'
  25. search_url = (
  26. # fmt: off
  27. '/?do=search'
  28. '&{query}'
  29. # fmt: on
  30. )
  31. # '&startRecord={offset}'
  32. # '&maximumRecords={limit}'
  33. # do search-request
  34. def request(query, params):
  35. params['url'] = base_url + search_url.format(query=urlencode({'id': query}))
  36. return params
  37. # get response from search-request
  38. def response(resp):
  39. results = []
  40. doc = fromstring(resp.text)
  41. # parse results
  42. # Quickhits
  43. for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
  44. try:
  45. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  46. except: # pylint: disable=bare-except
  47. continue
  48. if not res_url:
  49. continue
  50. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  51. # append result
  52. results.append({'title': title, 'content': "", 'url': urljoin(base_url, res_url)})
  53. # Search results
  54. for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
  55. try:
  56. if r.tag == "dt":
  57. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  58. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  59. elif r.tag == "dd":
  60. content = extract_text(eval_xpath(r, '.'))
  61. # append result
  62. results.append({'title': title, 'content': content, 'url': urljoin(base_url, res_url)})
  63. except: # pylint: disable=bare-except
  64. continue
  65. if not res_url:
  66. continue
  67. # return results
  68. return results