tokyotoshokan.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. """
  2. Tokyo Toshokan (A BitTorrent Library for Japanese Media)
  3. @website https://www.tokyotosho.info/
  4. @provide-api no
  5. @using-api no
  6. @results HTML
  7. @stable no (HTML can change)
  8. @parse url, title, publishedDate, seed, leech,
  9. filesize, magnetlink, content
  10. """
  11. import re
  12. from urllib import urlencode
  13. from lxml import html
  14. from searx.engines.xpath import extract_text
  15. from datetime import datetime
  16. from searx.engines.nyaa import int_or_zero, get_filesize_mul
  17. # engine dependent config
  18. categories = ['files', 'videos', 'music']
  19. paging = True
  20. # search-url
  21. base_url = 'https://www.tokyotosho.info/'
  22. search_url = base_url + 'search.php?{query}'
  23. # do search-request
  24. def request(query, params):
  25. query = urlencode({'page': params['pageno'],
  26. 'terms': query})
  27. params['url'] = search_url.format(query=query)
  28. return params
  29. # get response from search-request
  30. def response(resp):
  31. results = []
  32. dom = html.fromstring(resp.text)
  33. rows = dom.xpath('//table[@class="listing"]//tr[contains(@class, "category_0")]')
  34. # check if there are no results or page layout was changed so we cannot parse it
  35. # currently there are two rows for each result, so total count must be even
  36. if len(rows) == 0 or len(rows) % 2 != 0:
  37. return []
  38. # regular expression for parsing torrent size strings
  39. size_re = re.compile(r'Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)
  40. # processing the results, two rows at a time
  41. for i in xrange(0, len(rows), 2):
  42. # parse the first row
  43. name_row = rows[i]
  44. links = name_row.xpath('./td[@class="desc-top"]/a')
  45. params = {
  46. 'template': 'torrent.html',
  47. 'url': links[-1].attrib.get('href'),
  48. 'title': extract_text(links[-1])
  49. }
  50. # I have not yet seen any torrents without magnet links, but
  51. # it's better to be prepared to stumble upon one some day
  52. if len(links) == 2:
  53. magnet = links[0].attrib.get('href')
  54. if magnet.startswith('magnet'):
  55. # okay, we have a valid magnet link, let's add it to the result
  56. params['magnetlink'] = magnet
  57. # no more info in the first row, start parsing the second one
  58. info_row = rows[i + 1]
  59. desc = extract_text(info_row.xpath('./td[@class="desc-bot"]')[0])
  60. for item in desc.split('|'):
  61. item = item.strip()
  62. if item.startswith('Size:'):
  63. try:
  64. # ('1.228', 'GB')
  65. groups = size_re.match(item).groups()
  66. multiplier = get_filesize_mul(groups[1])
  67. params['filesize'] = int(multiplier * float(groups[0]))
  68. except Exception as e:
  69. pass
  70. elif item.startswith('Date:'):
  71. try:
  72. # Date: 2016-02-21 21:44 UTC
  73. date = datetime.strptime(item, 'Date: %Y-%m-%d %H:%M UTC')
  74. params['publishedDate'] = date
  75. except Exception as e:
  76. pass
  77. elif item.startswith('Comment:'):
  78. params['content'] = item
  79. stats = info_row.xpath('./td[@class="stats"]/span')
  80. # has the layout not changed yet?
  81. if len(stats) == 3:
  82. params['seed'] = int_or_zero(extract_text(stats[0]))
  83. params['leech'] = int_or_zero(extract_text(stats[1]))
  84. results.append(params)
  85. return results