ollama.py 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Ollama model search engine for searxng"""
  3. from urllib.parse import urlencode
  4. from datetime import datetime
  5. from lxml import html
  6. from searx.utils import eval_xpath_list, eval_xpath_getindex, eval_xpath, extract_text
  7. from searx.result_types import EngineResults
  8. about = {
  9. "website": "https://ollama.com",
  10. "wikidata_id": "Q124636097",
  11. "use_official_api": False,
  12. "require_api_key": False,
  13. "results": "HTML",
  14. }
  15. categories = ["it", "repos"]
  16. base_url = "https://ollama.com"
  17. results_xpath = '//li[@x-test-model]'
  18. title_xpath = './/span[@x-test-search-response-title]/text()'
  19. content_xpath = './/p[@class="max-w-lg break-words text-neutral-800 text-md"]/text()'
  20. url_xpath = './a/@href'
  21. publish_date_xpath = './/span[contains(@class, "flex items-center")]/@title'
  22. def request(query, params):
  23. query_params = {"q": query}
  24. params['url'] = f"{base_url}/search?{urlencode(query_params)}"
  25. return params
  26. def response(resp) -> EngineResults:
  27. res = EngineResults()
  28. dom = html.fromstring(resp.text)
  29. for item in eval_xpath_list(dom, results_xpath):
  30. published_date = None
  31. try:
  32. published_date = datetime.strptime(
  33. extract_text(eval_xpath(item, publish_date_xpath)), "%b %d, %Y %I:%M %p %Z"
  34. )
  35. except ValueError:
  36. pass
  37. res.add(
  38. res.types.MainResult(
  39. title=extract_text(eval_xpath(item, title_xpath)),
  40. content=extract_text(eval_xpath(item, content_xpath)),
  41. url=f"{base_url}{eval_xpath_getindex(item, url_xpath, 0)}",
  42. publishedDate=published_date,
  43. )
  44. )
  45. return res