baidu.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Baidu_
  3. .. _Baidu: https://www.baidu.com
  4. """
  5. # There exits a https://github.com/ohblue/baidu-serp-api/
  6. # but we don't use it here (may we can learn from).
  7. from urllib.parse import urlencode
  8. from datetime import datetime
  9. import time
  10. import json
  11. from searx.exceptions import SearxEngineAPIException
  12. from searx.utils import html_to_text
  13. about = {
  14. "website": "https://www.baidu.com",
  15. "wikidata_id": "Q14772",
  16. "official_api_documentation": None,
  17. "use_official_api": False,
  18. "require_api_key": False,
  19. "results": "JSON",
  20. "language": "zh",
  21. }
  22. paging = True
  23. categories = []
  24. results_per_page = 10
  25. baidu_category = 'general'
  26. time_range_support = True
  27. time_range_dict = {"day": 86400, "week": 604800, "month": 2592000, "year": 31536000}
  28. def init(_):
  29. if baidu_category not in ('general', 'images', 'it'):
  30. raise SearxEngineAPIException(f"Unsupported category: {baidu_category}")
  31. def request(query, params):
  32. page_num = params["pageno"]
  33. category_config = {
  34. 'general': {
  35. 'endpoint': 'https://www.baidu.com/s',
  36. 'params': {
  37. "wd": query,
  38. "rn": results_per_page,
  39. "pn": (page_num - 1) * results_per_page,
  40. "tn": "json",
  41. },
  42. },
  43. 'images': {
  44. 'endpoint': 'https://image.baidu.com/search/acjson',
  45. 'params': {
  46. "word": query,
  47. "rn": results_per_page,
  48. "pn": (page_num - 1) * results_per_page,
  49. "tn": "resultjson_com",
  50. },
  51. },
  52. 'it': {
  53. 'endpoint': 'https://kaifa.baidu.com/rest/v1/search',
  54. 'params': {
  55. "wd": query,
  56. "pageSize": results_per_page,
  57. "pageNum": page_num,
  58. "paramList": f"page_num={page_num},page_size={results_per_page}",
  59. "position": 0,
  60. },
  61. },
  62. }
  63. query_params = category_config[baidu_category]['params']
  64. query_url = category_config[baidu_category]['endpoint']
  65. if params.get("time_range") in time_range_dict:
  66. now = int(time.time())
  67. past = now - time_range_dict[params["time_range"]]
  68. if baidu_category == 'general':
  69. query_params["gpc"] = f"stf={past},{now}|stftype=1"
  70. if baidu_category == 'it':
  71. query_params["paramList"] += f",timestamp_range={past}-{now}"
  72. params["url"] = f"{query_url}?{urlencode(query_params)}"
  73. return params
  74. def response(resp):
  75. text = resp.text
  76. if baidu_category == 'images':
  77. # baidu's JSON encoder wrongly quotes / and ' characters by \\ and \'
  78. text = text.replace(r"\/", "/").replace(r"\'", "'")
  79. data = json.loads(text, strict=False)
  80. parsers = {'general': parse_general, 'images': parse_images, 'it': parse_it}
  81. return parsers[baidu_category](data)
  82. def parse_general(data):
  83. results = []
  84. if not data.get("feed", {}).get("entry"):
  85. raise SearxEngineAPIException("Invalid response")
  86. for entry in data["feed"]["entry"]:
  87. if not entry.get("title") or not entry.get("url"):
  88. continue
  89. published_date = None
  90. if entry.get("time"):
  91. try:
  92. published_date = datetime.fromtimestamp(entry["time"])
  93. except (ValueError, TypeError):
  94. published_date = None
  95. results.append(
  96. {
  97. "title": entry["title"],
  98. "url": entry["url"],
  99. "content": entry.get("abs", ""),
  100. "publishedDate": published_date,
  101. }
  102. )
  103. return results
  104. def parse_images(data):
  105. results = []
  106. if "data" in data:
  107. for item in data["data"]:
  108. if not item:
  109. # the last item in the JSON list is empty, the JSON string ends with "}, {}]"
  110. continue
  111. replace_url = item.get("replaceUrl", [{}])[0]
  112. width = item.get("width")
  113. height = item.get("height")
  114. img_date = item.get("bdImgnewsDate")
  115. publishedDate = None
  116. if img_date:
  117. publishedDate = datetime.strptime(img_date, "%Y-%m-%d %H:%M")
  118. results.append(
  119. {
  120. "template": "images.html",
  121. "url": replace_url.get("FromURL"),
  122. "thumbnail_src": item.get("thumbURL"),
  123. "img_src": replace_url.get("ObjURL"),
  124. "title": html_to_text(item.get("fromPageTitle")),
  125. "source": item.get("fromURLHost"),
  126. "resolution": f"{width} x {height}",
  127. "img_format": item.get("type"),
  128. "filesize": item.get("filesize"),
  129. "publishedDate": publishedDate,
  130. }
  131. )
  132. return results
  133. def parse_it(data):
  134. results = []
  135. if not data.get("data", {}).get("documents", {}).get("data"):
  136. raise SearxEngineAPIException("Invalid response")
  137. for entry in data["data"]["documents"]["data"]:
  138. results.append(
  139. {
  140. 'title': entry["techDocDigest"]["title"],
  141. 'url': entry["techDocDigest"]["url"],
  142. 'content': entry["techDocDigest"]["summary"],
  143. }
  144. )
  145. return results