bandcamp.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Bandcamp (Music)
  4. @website https://bandcamp.com/
  5. @provide-api no
  6. @results HTML
  7. @parse url, title, content, publishedDate, iframe_src, thumbnail
  8. """
  9. from urllib.parse import urlencode, urlparse, parse_qs
  10. from dateutil.parser import parse as dateparse
  11. from lxml import html
  12. from searx.utils import (
  13. eval_xpath_getindex,
  14. eval_xpath_list,
  15. extract_text,
  16. )
  17. # about
  18. about = {
  19. "website": 'https://bandcamp.com/',
  20. "wikidata_id": 'Q545966',
  21. "official_api_documentation": 'https://bandcamp.com/developer',
  22. "use_official_api": False,
  23. "require_api_key": False,
  24. "results": 'HTML',
  25. }
  26. categories = ['music']
  27. paging = True
  28. base_url = "https://bandcamp.com/"
  29. search_string = 'search?{query}&page={page}'
  30. iframe_src = "https://bandcamp.com/EmbeddedPlayer/{type}={result_id}/size=large/bgcol=000/linkcol=fff/artwork=small"
  31. def request(query, params):
  32. '''pre-request callback
  33. params<dict>:
  34. method : POST/GET
  35. headers : {}
  36. data : {} # if method == POST
  37. url : ''
  38. category: 'search category'
  39. pageno : 1 # number of the requested page
  40. '''
  41. search_path = search_string.format(query=urlencode({'q': query}), page=params['pageno'])
  42. params['url'] = base_url + search_path
  43. return params
  44. def response(resp):
  45. '''post-response callback
  46. resp: requests response object
  47. '''
  48. results = []
  49. dom = html.fromstring(resp.text)
  50. for result in eval_xpath_list(dom, '//li[contains(@class, "searchresult")]'):
  51. link = eval_xpath_getindex(result, './/div[@class="itemurl"]/a', 0, default=None)
  52. if link is None:
  53. continue
  54. title = result.xpath('.//div[@class="heading"]/a/text()')
  55. content = result.xpath('.//div[@class="subhead"]/text()')
  56. new_result = {
  57. "url": extract_text(link),
  58. "title": extract_text(title),
  59. "content": extract_text(content),
  60. }
  61. date = eval_xpath_getindex(result, '//div[@class="released"]/text()', 0, default=None)
  62. if date:
  63. new_result["publishedDate"] = dateparse(date.replace("released ", ""))
  64. thumbnail = result.xpath('.//div[@class="art"]/img/@src')
  65. if thumbnail:
  66. new_result['img_src'] = thumbnail[0]
  67. result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0]
  68. itemtype = extract_text(result.xpath('.//div[@class="itemtype"]')).lower()
  69. if "album" == itemtype:
  70. new_result["iframe_src"] = iframe_src.format(type='album', result_id=result_id)
  71. elif "track" == itemtype:
  72. new_result["iframe_src"] = iframe_src.format(type='track', result_id=result_id)
  73. results.append(new_result)
  74. return results