hackernews.py 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Hackernews
  4. """
  5. from datetime import datetime
  6. from urllib.parse import urlencode
  7. from dateutil.relativedelta import relativedelta
  8. from flask_babel import gettext
  9. # Engine metadata
  10. about = {
  11. "website": "https://news.ycombinator.com/",
  12. "wikidata_id": "Q686797",
  13. "official_api_documentation": "https://hn.algolia.com/api",
  14. "use_official_api": True,
  15. "require_api_key": False,
  16. "results": "JSON",
  17. }
  18. # Engine configuration
  19. paging = True
  20. time_range_support = True
  21. categories = ["it"]
  22. results_per_page = 30
  23. # Search URL
  24. base_url = "https://hn.algolia.com/api/v1"
  25. def request(query, params):
  26. search_type = 'search'
  27. if not query:
  28. # if search query is empty show results from HN's front page
  29. search_type = 'search_by_date'
  30. query_params = {
  31. "tags": "front_page",
  32. "page": (params["pageno"] - 1),
  33. }
  34. else:
  35. query_params = {
  36. "query": query,
  37. "page": (params["pageno"] - 1),
  38. "hitsPerPage": results_per_page,
  39. "minWordSizefor1Typo": 4,
  40. "minWordSizefor2Typos": 8,
  41. "advancedSyntax": "true",
  42. "ignorePlurals": "false",
  43. "minProximity": 7,
  44. "numericFilters": '[]',
  45. "tagFilters": '["story",[]]',
  46. "typoTolerance": "true",
  47. "queryType": "prefixLast",
  48. "restrictSearchableAttributes": '["title","comment_text","url","story_text","author"]',
  49. "getRankingInfo": "true",
  50. }
  51. if params['time_range']:
  52. search_type = 'search_by_date'
  53. timestamp = (datetime.now() - relativedelta(**{f"{params['time_range']}s": 1})).timestamp()
  54. query_params["numericFilters"] = f"created_at_i>{timestamp}"
  55. params["url"] = f"{base_url}/{search_type}?{urlencode(query_params)}"
  56. return params
  57. def response(resp):
  58. results = []
  59. data = resp.json()
  60. for hit in data["hits"]:
  61. object_id = hit["objectID"]
  62. points = hit.get("points") or 0
  63. num_comments = hit.get("num_comments") or 0
  64. metadata = ""
  65. if points != 0 or num_comments != 0:
  66. metadata = f"{gettext('points')}: {points}" f" | {gettext('comments')}: {num_comments}"
  67. results.append(
  68. {
  69. "title": hit.get("title") or f"{gettext('author')}: {hit['author']}",
  70. "url": f"https://news.ycombinator.com/item?id={object_id}",
  71. "content": hit.get("url") or hit.get("comment_text") or hit.get("story_text") or "",
  72. "metadata": metadata,
  73. "author": hit["author"],
  74. "publishedDate": datetime.utcfromtimestamp(hit["created_at_i"]),
  75. }
  76. )
  77. return results