doku.py 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Doku Wiki
  4. """
  5. from urllib.parse import urlencode
  6. from lxml.html import fromstring
  7. from searx.utils import extract_text, eval_xpath
  8. # about
  9. about = {
  10. "website": 'https://www.dokuwiki.org/',
  11. "wikidata_id": 'Q851864',
  12. "official_api_documentation": 'https://www.dokuwiki.org/devel:xmlrpc',
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine dependent config
  18. categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
  19. paging = False
  20. number_of_results = 5
  21. # search-url
  22. # Doku is OpenSearch compatible
  23. base_url = 'http://localhost:8090'
  24. search_url = '/?do=search' '&{query}'
  25. # TODO '&startRecord={offset}'\
  26. # TODO '&maximumRecords={limit}'\
  27. # do search-request
  28. def request(query, params):
  29. params['url'] = base_url + search_url.format(query=urlencode({'id': query}))
  30. return params
  31. # get response from search-request
  32. def response(resp):
  33. results = []
  34. doc = fromstring(resp.text)
  35. # parse results
  36. # Quickhits
  37. for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
  38. try:
  39. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  40. except:
  41. continue
  42. if not res_url:
  43. continue
  44. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  45. # append result
  46. results.append({'title': title, 'content': "", 'url': base_url + res_url})
  47. # Search results
  48. for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
  49. try:
  50. if r.tag == "dt":
  51. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  52. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  53. elif r.tag == "dd":
  54. content = extract_text(eval_xpath(r, '.'))
  55. # append result
  56. results.append({'title': title, 'content': content, 'url': base_url + res_url})
  57. except:
  58. continue
  59. if not res_url:
  60. continue
  61. # return results
  62. return results