solidtorrents.py 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """SolidTorrents
  4. """
  5. from datetime import datetime
  6. from urllib.parse import urlencode
  7. import random
  8. from lxml import html
  9. from searx.utils import (
  10. extract_text,
  11. eval_xpath,
  12. eval_xpath_getindex,
  13. eval_xpath_list,
  14. get_torrent_size,
  15. )
  16. about = {
  17. "website": 'https://www.solidtorrents.to/',
  18. "wikidata_id": None,
  19. "official_api_documentation": None,
  20. "use_official_api": False,
  21. "require_api_key": False,
  22. "results": 'HTML',
  23. }
  24. categories = ['files']
  25. paging = True
  26. # base_url can be overwritten by a list of URLs in the settings.yml
  27. base_url = 'https://solidtorrents.to'
  28. def request(query, params):
  29. if isinstance(base_url, list):
  30. params['base_url'] = random.choice(base_url)
  31. else:
  32. params['base_url'] = base_url
  33. search_url = params['base_url'] + '/search?{query}'
  34. query = urlencode({'q': query, 'page': params['pageno']})
  35. params['url'] = search_url.format(query=query)
  36. return params
  37. def response(resp):
  38. results = []
  39. dom = html.fromstring(resp.text)
  40. for result in eval_xpath(dom, '//li[contains(@class, "search-result")]'):
  41. torrentfile = eval_xpath_getindex(result, './/a[contains(@class, "dl-torrent")]/@href', 0, None)
  42. magnet = eval_xpath_getindex(result, './/a[contains(@class, "dl-magnet")]/@href', 0, None)
  43. if torrentfile is None or magnet is None:
  44. continue # ignore anime results that which aren't actually torrents
  45. title = eval_xpath_getindex(result, './/h5[contains(@class, "title")]', 0, None)
  46. url = eval_xpath_getindex(result, './/h5[contains(@class, "title")]/a/@href', 0, None)
  47. categ = eval_xpath(result, './/a[contains(@class, "category")]')
  48. stats = eval_xpath_list(result, './/div[contains(@class, "stats")]/div', min_len=5)
  49. params = {
  50. 'seed': extract_text(stats[3]),
  51. 'leech': extract_text(stats[2]),
  52. 'title': extract_text(title),
  53. 'url': resp.search_params['base_url'] + url,
  54. 'filesize': get_torrent_size(*extract_text(stats[1]).split()),
  55. 'magnetlink': magnet,
  56. 'torrentfile': torrentfile,
  57. 'metadata': extract_text(categ),
  58. 'template': "torrent.html",
  59. }
  60. try:
  61. params['publishedDate'] = datetime.strptime(extract_text(stats[4]), '%b %d, %Y')
  62. except ValueError:
  63. pass
  64. results.append(params)
  65. return results