lib_rs.py 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """lib.rs (packages)"""
  4. from urllib.parse import quote_plus
  5. from lxml import html
  6. from searx.utils import eval_xpath, eval_xpath_list, extract_text
  7. about = {
  8. 'website': 'https://lib.rs',
  9. 'wikidata_id': 'Q113486010',
  10. 'use_official_api': False,
  11. 'require_api_key': False,
  12. 'results': "HTML",
  13. }
  14. categories = ["it", "packages"]
  15. base_url = 'https://lib.rs'
  16. results_xpath = '/html/body/main/div/ol/li/a'
  17. url_xpath = './@href'
  18. title_xpath = './div[@class="h"]/h4'
  19. content_xpath = './div[@class="h"]/p'
  20. version_xpath = './div[@class="meta"]/span[contains(@class, "version")]'
  21. download_count_xpath = './div[@class="meta"]/span[@class="downloads"]'
  22. tags_xpath = './div[@class="meta"]/span[contains(@class, "k")]/text()'
  23. def request(query, params):
  24. params['url'] = f"{base_url}/search?q={quote_plus(query)}"
  25. return params
  26. def response(resp):
  27. results = []
  28. doc = html.fromstring(resp.text)
  29. for result in eval_xpath_list(doc, results_xpath):
  30. package_name = extract_text(eval_xpath(result, title_xpath))
  31. results.append(
  32. {
  33. 'template': 'packages.html',
  34. 'title': package_name,
  35. 'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
  36. 'content': extract_text(eval_xpath(result, content_xpath)),
  37. 'package_name': package_name,
  38. 'version': extract_text(eval_xpath(result, version_xpath)),
  39. 'popularity': extract_text(eval_xpath(result, download_count_xpath)),
  40. 'tags': eval_xpath_list(result, tags_xpath),
  41. }
  42. )
  43. return results