solidtorrents.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """SolidTorrents
  4. """
  5. from datetime import datetime
  6. from urllib.parse import urlencode
  7. import random
  8. from lxml import html
  9. from searx.utils import (
  10. extract_text,
  11. eval_xpath,
  12. eval_xpath_getindex,
  13. eval_xpath_list,
  14. get_torrent_size,
  15. )
  16. about = {
  17. "website": 'https://www.solidtorrents.net/',
  18. "wikidata_id": None,
  19. "official_api_documentation": None,
  20. "use_official_api": False,
  21. "require_api_key": False,
  22. "results": 'HTML',
  23. }
  24. categories = ['files']
  25. paging = True
  26. base_url = ''
  27. base_url_rand = ''
  28. def request(query, params):
  29. global base_url_rand # pylint: disable=global-statement
  30. if isinstance(base_url, list):
  31. base_url_rand = random.choice(base_url)
  32. else:
  33. base_url_rand = base_url
  34. search_url = base_url_rand + '/search?{query}'
  35. page = (params['pageno'] - 1) * 20
  36. query = urlencode({'q': query, 'page': page})
  37. params['url'] = search_url.format(query=query)
  38. return params
  39. def response(resp):
  40. results = []
  41. dom = html.fromstring(resp.text)
  42. for result in eval_xpath(dom, '//div[contains(@class, "search-result")]'):
  43. a = eval_xpath_getindex(result, './div/h5/a', 0, None)
  44. if a is None:
  45. continue
  46. title = extract_text(a)
  47. url = eval_xpath_getindex(a, '@href', 0, None)
  48. categ = eval_xpath(result, './div//a[contains(@class, "category")]')
  49. metadata = extract_text(categ)
  50. stats = eval_xpath_list(result, './div//div[contains(@class, "stats")]/div', min_len=5)
  51. n, u = extract_text(stats[1]).split()
  52. filesize = get_torrent_size(n, u)
  53. leech = extract_text(stats[2])
  54. seed = extract_text(stats[3])
  55. torrentfile = eval_xpath_getindex(result, './div//a[contains(@class, "dl-torrent")]/@href', 0, None)
  56. magnet = eval_xpath_getindex(result, './div//a[contains(@class, "dl-magnet")]/@href', 0, None)
  57. params = {
  58. 'seed': seed,
  59. 'leech': leech,
  60. 'title': title,
  61. 'url': base_url_rand + url,
  62. 'filesize': filesize,
  63. 'magnetlink': magnet,
  64. 'torrentfile': torrentfile,
  65. 'metadata': metadata,
  66. 'template': "torrent.html",
  67. }
  68. date_str = extract_text(stats[4])
  69. try:
  70. params['publishedDate'] = datetime.strptime(date_str, '%b %d, %Y')
  71. except ValueError:
  72. pass
  73. results.append(params)
  74. return results