goodreads.py 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Goodreads (books)
  4. """
  5. from urllib.parse import urlencode
  6. from lxml import html
  7. from searx.utils import extract_text, eval_xpath, eval_xpath_list
  8. about = {
  9. 'website': 'https://www.goodreads.com',
  10. 'wikidata_id': 'Q2359213',
  11. 'official_api_documentation': None,
  12. 'use_official_api': False,
  13. 'require_api_key': False,
  14. 'results': 'HTML',
  15. }
  16. categories = []
  17. paging = True
  18. base_url = "https://www.goodreads.com"
  19. results_xpath = "//table//tr"
  20. thumbnail_xpath = ".//img[contains(@class, 'bookCover')]/@src"
  21. url_xpath = ".//a[contains(@class, 'bookTitle')]/@href"
  22. title_xpath = ".//a[contains(@class, 'bookTitle')]"
  23. author_xpath = ".//a[contains(@class, 'authorName')]"
  24. info_text_xpath = ".//span[contains(@class, 'uitext')]"
  25. def request(query, params):
  26. args = {
  27. 'q': query,
  28. 'page': params['pageno'],
  29. }
  30. params['url'] = f"{base_url}/search?{urlencode(args)}"
  31. return params
  32. def response(resp):
  33. results = []
  34. dom = html.fromstring(resp.text)
  35. for result in eval_xpath_list(dom, results_xpath):
  36. results.append(
  37. {
  38. 'url': base_url + extract_text(eval_xpath(result, url_xpath)),
  39. 'title': extract_text(eval_xpath(result, title_xpath)),
  40. 'img_src': extract_text(eval_xpath(result, thumbnail_xpath)),
  41. 'content': extract_text(eval_xpath(result, info_text_xpath)),
  42. 'metadata': extract_text(eval_xpath(result, author_xpath)),
  43. }
  44. )
  45. return results