solidtorrents.py 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """SolidTorrents
  4. """
  5. from datetime import datetime
  6. from urllib.parse import urlencode
  7. import random
  8. from lxml import html
  9. from searx.utils import (
  10. extract_text,
  11. eval_xpath,
  12. eval_xpath_getindex,
  13. eval_xpath_list,
  14. get_torrent_size,
  15. )
  16. about = {
  17. "website": 'https://www.solidtorrents.net/',
  18. "wikidata_id": None,
  19. "official_api_documentation": None,
  20. "use_official_api": False,
  21. "require_api_key": False,
  22. "results": 'HTML',
  23. }
  24. categories = ['files']
  25. paging = True
  26. # base_url can be overwritten by a list of URLs in the settings.yml
  27. base_url = 'https://solidtorrents.net'
  28. def request(query, params):
  29. if isinstance(base_url, list):
  30. params['base_url'] = random.choice(base_url)
  31. else:
  32. params['base_url'] = base_url
  33. search_url = params['base_url'] + '/search?{query}'
  34. page = (params['pageno'] - 1) * 20
  35. query = urlencode({'q': query, 'page': page})
  36. params['url'] = search_url.format(query=query)
  37. return params
  38. def response(resp):
  39. results = []
  40. dom = html.fromstring(resp.text)
  41. for result in eval_xpath(dom, '//div[contains(@class, "search-result")]'):
  42. a = eval_xpath_getindex(result, './div/h5/a', 0, None)
  43. if a is None:
  44. continue
  45. title = extract_text(a)
  46. url = eval_xpath_getindex(a, '@href', 0, None)
  47. categ = eval_xpath(result, './div//a[contains(@class, "category")]')
  48. metadata = extract_text(categ)
  49. stats = eval_xpath_list(result, './div//div[contains(@class, "stats")]/div', min_len=5)
  50. n, u = extract_text(stats[1]).split()
  51. filesize = get_torrent_size(n, u)
  52. leech = extract_text(stats[2])
  53. seed = extract_text(stats[3])
  54. torrentfile = eval_xpath_getindex(result, './div//a[contains(@class, "dl-torrent")]/@href', 0, None)
  55. magnet = eval_xpath_getindex(result, './div//a[contains(@class, "dl-magnet")]/@href', 0, None)
  56. params = {
  57. 'seed': seed,
  58. 'leech': leech,
  59. 'title': title,
  60. 'url': resp.search_params['base_url'] + url,
  61. 'filesize': filesize,
  62. 'magnetlink': magnet,
  63. 'torrentfile': torrentfile,
  64. 'metadata': metadata,
  65. 'template': "torrent.html",
  66. }
  67. date_str = extract_text(stats[4])
  68. try:
  69. params['publishedDate'] = datetime.strptime(date_str, '%b %d, %Y')
  70. except ValueError:
  71. pass
  72. results.append(params)
  73. return results