chinaso.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """ChinaSo: A search engine from ChinaSo."""
  3. from urllib.parse import urlencode
  4. from datetime import datetime
  5. from searx.exceptions import SearxEngineAPIException
  6. from searx.utils import html_to_text
  7. about = {
  8. "website": "https://www.chinaso.com/",
  9. "wikidata_id": "Q10846064",
  10. "use_official_api": False,
  11. "require_api_key": False,
  12. "results": "JSON",
  13. }
  14. paging = True
  15. time_range_support = True
  16. results_per_page = 10
  17. categories = []
  18. chinaso_category = 'news'
  19. """ChinaSo supports news, videos, images search.
  20. - ``news``: search for news
  21. - ``videos``: search for videos
  22. - ``images``: search for images
  23. """
  24. time_range_dict = {'day': '24h', 'week': '1w', 'month': '1m', 'year': '1y'}
  25. base_url = "https://www.chinaso.com"
  26. def init(_):
  27. if chinaso_category not in ('news', 'videos', 'images'):
  28. raise SearxEngineAPIException(f"Unsupported category: {chinaso_category}")
  29. def request(query, params):
  30. query_params = {"q": query}
  31. if time_range_dict.get(params['time_range']):
  32. query_params["stime"] = time_range_dict[params['time_range']]
  33. query_params["etime"] = 'now'
  34. category_config = {
  35. 'news': {'endpoint': '/v5/general/v1/web/search', 'params': {'pn': params["pageno"], 'ps': results_per_page}},
  36. 'images': {
  37. 'endpoint': '/v5/general/v1/search/image',
  38. 'params': {'start_index': (params["pageno"] - 1) * results_per_page, 'rn': results_per_page},
  39. },
  40. 'videos': {
  41. 'endpoint': '/v5/general/v1/search/video',
  42. 'params': {'start_index': (params["pageno"] - 1) * results_per_page, 'rn': results_per_page},
  43. },
  44. }
  45. query_params.update(category_config[chinaso_category]['params'])
  46. params["url"] = f"{base_url}{category_config[chinaso_category]['endpoint']}?{urlencode(query_params)}"
  47. return params
  48. def response(resp):
  49. try:
  50. data = resp.json()
  51. except Exception as e:
  52. raise SearxEngineAPIException(f"Invalid response: {e}") from e
  53. parsers = {'news': parse_news, 'images': parse_images, 'videos': parse_videos}
  54. return parsers[chinaso_category](data)
  55. def parse_news(data):
  56. results = []
  57. if not data.get("data", {}).get("data"):
  58. raise SearxEngineAPIException("Invalid response")
  59. for entry in data["data"]["data"]:
  60. published_date = None
  61. if entry.get("timestamp"):
  62. try:
  63. published_date = datetime.fromtimestamp(int(entry["timestamp"]))
  64. except (ValueError, TypeError):
  65. pass
  66. results.append(
  67. {
  68. 'title': html_to_text(entry["title"]),
  69. 'url': entry["url"],
  70. 'content': html_to_text(entry["snippet"]),
  71. 'publishedDate': published_date,
  72. }
  73. )
  74. return results
  75. def parse_images(data):
  76. results = []
  77. if not data.get("data", {}).get("arrRes"):
  78. raise SearxEngineAPIException("Invalid response")
  79. for entry in data["data"]["arrRes"]:
  80. results.append(
  81. {
  82. 'url': entry["web_url"],
  83. 'title': html_to_text(entry["title"]),
  84. 'content': html_to_text(entry["ImageInfo"]),
  85. 'template': 'images.html',
  86. 'img_src': entry["url"].replace("http://", "https://"),
  87. 'thumbnail_src': entry["largeimage"].replace("http://", "https://"),
  88. }
  89. )
  90. return results
  91. def parse_videos(data):
  92. results = []
  93. if not data.get("data", {}).get("arrRes"):
  94. raise SearxEngineAPIException("Invalid response")
  95. for entry in data["data"]["arrRes"]:
  96. published_date = None
  97. if entry.get("VideoPubDate"):
  98. try:
  99. published_date = datetime.fromtimestamp(int(entry["VideoPubDate"]))
  100. except (ValueError, TypeError):
  101. pass
  102. results.append(
  103. {
  104. 'url': entry["url"],
  105. 'title': html_to_text(entry["raw_title"]),
  106. 'template': 'videos.html',
  107. 'publishedDate': published_date,
  108. 'thumbnail': entry["image_src"].replace("http://", "https://"),
  109. }
  110. )
  111. return results