digg.py 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. """
  2. Digg (News, Social media)
  3. @website https://digg.com
  4. @provide-api no
  5. @using-api no
  6. @results HTML (using search portal)
  7. @stable no (HTML can change)
  8. @parse url, title, content, publishedDate, thumbnail
  9. """
  10. # pylint: disable=missing-function-docstring
  11. from json import loads
  12. from urllib.parse import urlencode
  13. from datetime import datetime
  14. from lxml import html
  15. # engine dependent config
  16. categories = ['news', 'social media']
  17. paging = True
  18. base_url = 'https://digg.com'
  19. # search-url
  20. search_url = base_url + (
  21. '/api/search/'
  22. '?{query}'
  23. '&from={position}'
  24. '&size=20'
  25. '&format=html'
  26. )
  27. def request(query, params):
  28. offset = (params['pageno'] - 1) * 20
  29. params['url'] = search_url.format(
  30. query = urlencode({'q': query}),
  31. position = offset,
  32. )
  33. return params
  34. def response(resp):
  35. results = []
  36. # parse results
  37. for result in loads(resp.text)['mapped']:
  38. # strip html tags and superfluous quotation marks from content
  39. content = html.document_fromstring(
  40. result['excerpt']
  41. ).text_content()
  42. # 'created': {'ISO': '2020-10-16T14:09:55Z', ...}
  43. published = datetime.strptime(
  44. result['created']['ISO'], '%Y-%m-%dT%H:%M:%SZ'
  45. )
  46. results.append({
  47. 'url': result['url'],
  48. 'title': result['title'],
  49. 'content' : content,
  50. 'template': 'videos.html',
  51. 'publishedDate': published,
  52. 'thumbnail': result['images']['thumbImage'],
  53. })
  54. return results