solidtorrents.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """SolidTorrents
  4. """
  5. from datetime import datetime
  6. from urllib.parse import urlencode
  7. import random
  8. from lxml import html
  9. from searx.utils import (
  10. extract_text,
  11. eval_xpath,
  12. eval_xpath_getindex,
  13. eval_xpath_list,
  14. get_torrent_size,
  15. )
  16. about = {
  17. "website": 'https://www.solidtorrents.net/',
  18. "wikidata_id": None,
  19. "official_api_documentation": None,
  20. "use_official_api": False,
  21. "require_api_key": False,
  22. "results": 'HTML',
  23. }
  24. categories = ['files']
  25. paging = True
  26. # base_url can be overwritten by a list of URLs in the settings.yml
  27. base_url = 'https://solidtorrents.net'
  28. def request(query, params):
  29. if isinstance(base_url, list):
  30. params['base_url'] = random.choice(base_url)
  31. else:
  32. params['base_url'] = base_url
  33. search_url = params['base_url'] + '/search?{query}'
  34. page = (params['pageno'] - 1) * 20
  35. query = urlencode({'q': query, 'page': page})
  36. params['url'] = search_url.format(query=query)
  37. return params
  38. def response(resp):
  39. results = []
  40. dom = html.fromstring(resp.text)
  41. for result in eval_xpath(dom, '//li[contains(@class, "search-result")]'):
  42. torrentfile = eval_xpath_getindex(result, './/a[contains(@class, "dl-torrent")]/@href', 0, None)
  43. magnet = eval_xpath_getindex(result, './/a[contains(@class, "dl-magnet")]/@href', 0, None)
  44. if torrentfile is None or magnet is None:
  45. continue # ignore anime results that which aren't actually torrents
  46. title = eval_xpath_getindex(result, './/h5[contains(@class, "title")]', 0, None)
  47. url = eval_xpath_getindex(result, './/h5[contains(@class, "title")]/a/@href', 0, None)
  48. categ = eval_xpath(result, './/a[contains(@class, "category")]')
  49. stats = eval_xpath_list(result, './/div[contains(@class, "stats")]/div', min_len=5)
  50. params = {
  51. 'seed': extract_text(stats[3]),
  52. 'leech': extract_text(stats[2]),
  53. 'title': extract_text(title),
  54. 'url': resp.search_params['base_url'] + url,
  55. 'filesize': get_torrent_size(*extract_text(stats[1]).split()),
  56. 'magnetlink': magnet,
  57. 'torrentfile': torrentfile,
  58. 'metadata': extract_text(categ),
  59. 'template': "torrent.html",
  60. }
  61. try:
  62. params['publishedDate'] = datetime.strptime(extract_text(stats[4]), '%b %d, %Y')
  63. except ValueError:
  64. pass
  65. results.append(params)
  66. return results