baidu.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Baidu_
  3. .. _Baidu: https://www.baidu.com
  4. """
  5. # There exits a https://github.com/ohblue/baidu-serp-api/
  6. # but we don't use it here (may we can learn from).
  7. from urllib.parse import urlencode
  8. from datetime import datetime
  9. import time
  10. import json
  11. from searx.exceptions import SearxEngineAPIException
  12. from searx.utils import html_to_text
  13. about = {
  14. "website": "https://www.baidu.com",
  15. "wikidata_id": "Q14772",
  16. "official_api_documentation": None,
  17. "use_official_api": False,
  18. "require_api_key": False,
  19. "results": "JSON",
  20. "language": "zh",
  21. }
  22. paging = True
  23. categories = []
  24. results_per_page = 10
  25. baidu_category = 'general'
  26. time_range_support = True
  27. time_range_dict = {"day": 86400, "week": 604800, "month": 2592000, "year": 31536000}
  28. def init(_):
  29. if baidu_category not in ('general', 'images', 'it'):
  30. raise SearxEngineAPIException(f"Unsupported category: {baidu_category}")
  31. def request(query, params):
  32. page_num = params["pageno"]
  33. category_config = {
  34. 'general': {
  35. 'endpoint': 'https://www.baidu.com/s',
  36. 'params': {
  37. "wd": query,
  38. "rn": results_per_page,
  39. "pn": (page_num - 1) * results_per_page,
  40. "tn": "json",
  41. },
  42. },
  43. 'images': {
  44. 'endpoint': 'https://image.baidu.com/search/acjson',
  45. 'params': {
  46. "word": query,
  47. "rn": results_per_page,
  48. "pn": (page_num - 1) * results_per_page,
  49. "tn": "resultjson_com",
  50. },
  51. },
  52. 'it': {
  53. 'endpoint': 'https://kaifa.baidu.com/rest/v1/search',
  54. 'params': {
  55. "wd": query,
  56. "pageSize": results_per_page,
  57. "pageNum": page_num,
  58. "paramList": f"page_num={page_num},page_size={results_per_page}",
  59. "position": 0,
  60. },
  61. },
  62. }
  63. query_params = category_config[baidu_category]['params']
  64. query_url = category_config[baidu_category]['endpoint']
  65. if params.get("time_range") in time_range_dict:
  66. now = int(time.time())
  67. past = now - time_range_dict[params["time_range"]]
  68. if baidu_category == 'general':
  69. query_params["gpc"] = f"stf={past},{now}|stftype=1"
  70. if baidu_category == 'it':
  71. query_params["paramList"] += f",timestamp_range={past}-{now}"
  72. params["url"] = f"{query_url}?{urlencode(query_params)}"
  73. return params
  74. def response(resp):
  75. try:
  76. data = json.loads(resp.text, strict=False)
  77. except Exception as e:
  78. raise SearxEngineAPIException(f"Invalid response: {e}") from e
  79. parsers = {'general': parse_general, 'images': parse_images, 'it': parse_it}
  80. return parsers[baidu_category](data)
  81. def parse_general(data):
  82. results = []
  83. if not data.get("feed", {}).get("entry"):
  84. raise SearxEngineAPIException("Invalid response")
  85. for entry in data["feed"]["entry"]:
  86. if not entry.get("title") or not entry.get("url"):
  87. continue
  88. published_date = None
  89. if entry.get("time"):
  90. try:
  91. published_date = datetime.fromtimestamp(entry["time"])
  92. except (ValueError, TypeError):
  93. published_date = None
  94. results.append(
  95. {
  96. "title": entry["title"],
  97. "url": entry["url"],
  98. "content": entry.get("abs", ""),
  99. "publishedDate": published_date,
  100. }
  101. )
  102. return results
  103. def parse_images(data):
  104. results = []
  105. if "data" in data:
  106. for item in data["data"]:
  107. replace_url = item.get("replaceUrl", [{}])[0]
  108. from_url = replace_url.get("FromURL", "").replace("\\/", "/")
  109. img_src = replace_url.get("ObjURL", "").replace("\\/", "/")
  110. results.append(
  111. {
  112. "template": "images.html",
  113. "url": from_url,
  114. "thumbnail_src": item.get("thumbURL", ""),
  115. "img_src": img_src,
  116. "content": html_to_text(item.get("fromPageTitleEnc", "")),
  117. "title": html_to_text(item.get("fromPageTitle", "")),
  118. "source": item.get("fromURLHost", ""),
  119. }
  120. )
  121. return results
  122. def parse_it(data):
  123. results = []
  124. if not data.get("data", {}).get("documents", {}).get("data"):
  125. raise SearxEngineAPIException("Invalid response")
  126. for entry in data["data"]["documents"]["data"]:
  127. results.append(
  128. {
  129. 'title': entry["techDocDigest"]["title"],
  130. 'url': entry["techDocDigest"]["url"],
  131. 'content': entry["techDocDigest"]["summary"],
  132. }
  133. )
  134. return results