digg.py 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. """
  2. Digg (News, Social media)
  3. @website https://digg.com/
  4. @provide-api no
  5. @using-api no
  6. @results HTML (using search portal)
  7. @stable no (HTML can change)
  8. @parse url, title, content, publishedDate, thumbnail
  9. """
  10. import random
  11. import string
  12. from json import loads
  13. from urllib.parse import urlencode
  14. from datetime import datetime
  15. # engine dependent config
  16. categories = ['news', 'social media']
  17. paging = True
  18. # search-url
  19. base_url = 'https://digg.com/'
  20. search_url = base_url + 'api/search/?{query}&from={position}&size=20&format=html'
  21. # specific xpath variables
  22. results_xpath = '//article'
  23. link_xpath = './/small[@class="time"]//a'
  24. title_xpath = './/h2//a//text()'
  25. content_xpath = './/p//text()'
  26. pubdate_xpath = './/time'
  27. digg_cookie_chars = string.ascii_uppercase + string.ascii_lowercase +\
  28. string.digits + "+_"
  29. # do search-request
  30. def request(query, params):
  31. offset = (params['pageno'] - 1) * 20
  32. params['url'] = search_url.format(position=offset,
  33. query=urlencode({'q': query}))
  34. params['cookies']['frontend.auid'] = ''.join(random.choice(
  35. digg_cookie_chars) for _ in range(22))
  36. return params
  37. # get response from search-request
  38. def response(resp):
  39. results = []
  40. search_result = loads(resp.text)
  41. # parse results
  42. for result in search_result['mapped']:
  43. published = datetime.strptime(result['created']['ISO'], "%Y-%m-%d %H:%M:%S")
  44. # append result
  45. results.append({'url': result['url'],
  46. 'title': result['title'],
  47. 'content': result['excerpt'],
  48. 'template': 'videos.html',
  49. 'publishedDate': published,
  50. 'thumbnail': result['images']['thumbImage']})
  51. # return results
  52. return results