doku.py 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Doku Wiki
  4. """
  5. from urllib.parse import urlencode
  6. from lxml.html import fromstring
  7. from searx.utils import extract_text, eval_xpath
  8. # about
  9. about = {
  10. "website": 'https://www.dokuwiki.org/',
  11. "wikidata_id": 'Q851864',
  12. "official_api_documentation": 'https://www.dokuwiki.org/devel:xmlrpc',
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine dependent config
  18. categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
  19. paging = False
  20. language_support = False
  21. number_of_results = 5
  22. # search-url
  23. # Doku is OpenSearch compatible
  24. base_url = 'http://localhost:8090'
  25. search_url = '/?do=search'\
  26. '&{query}'
  27. # TODO '&startRecord={offset}'\
  28. # TODO '&maximumRecords={limit}'\
  29. # do search-request
  30. def request(query, params):
  31. params['url'] = base_url +\
  32. search_url.format(query=urlencode({'id': query}))
  33. return params
  34. # get response from search-request
  35. def response(resp):
  36. results = []
  37. doc = fromstring(resp.text)
  38. # parse results
  39. # Quickhits
  40. for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
  41. try:
  42. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  43. except:
  44. continue
  45. if not res_url:
  46. continue
  47. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  48. # append result
  49. results.append({'title': title,
  50. 'content': "",
  51. 'url': base_url + res_url})
  52. # Search results
  53. for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
  54. try:
  55. if r.tag == "dt":
  56. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  57. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  58. elif r.tag == "dd":
  59. content = extract_text(eval_xpath(r, '.'))
  60. # append result
  61. results.append({'title': title,
  62. 'content': content,
  63. 'url': base_url + res_url})
  64. except:
  65. continue
  66. if not res_url:
  67. continue
  68. # return results
  69. return results