baidu.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Baidu_
  3. .. _Baidu: https://www.baidu.com
  4. """
  5. # There exits a https://github.com/ohblue/baidu-serp-api/
  6. # but we don't use it here (may we can learn from).
  7. from urllib.parse import urlencode
  8. from datetime import datetime
  9. from html import unescape
  10. import time
  11. import json
  12. from searx.exceptions import SearxEngineAPIException
  13. from searx.utils import html_to_text
  14. about = {
  15. "website": "https://www.baidu.com",
  16. "wikidata_id": "Q14772",
  17. "official_api_documentation": None,
  18. "use_official_api": False,
  19. "require_api_key": False,
  20. "results": "JSON",
  21. "language": "zh",
  22. }
  23. paging = True
  24. categories = []
  25. results_per_page = 10
  26. baidu_category = 'general'
  27. time_range_support = True
  28. time_range_dict = {"day": 86400, "week": 604800, "month": 2592000, "year": 31536000}
  29. def init(_):
  30. if baidu_category not in ('general', 'images', 'it'):
  31. raise SearxEngineAPIException(f"Unsupported category: {baidu_category}")
  32. def request(query, params):
  33. page_num = params["pageno"]
  34. category_config = {
  35. 'general': {
  36. 'endpoint': 'https://www.baidu.com/s',
  37. 'params': {
  38. "wd": query,
  39. "rn": results_per_page,
  40. "pn": (page_num - 1) * results_per_page,
  41. "tn": "json",
  42. },
  43. },
  44. 'images': {
  45. 'endpoint': 'https://image.baidu.com/search/acjson',
  46. 'params': {
  47. "word": query,
  48. "rn": results_per_page,
  49. "pn": (page_num - 1) * results_per_page,
  50. "tn": "resultjson_com",
  51. },
  52. },
  53. 'it': {
  54. 'endpoint': 'https://kaifa.baidu.com/rest/v1/search',
  55. 'params': {
  56. "wd": query,
  57. "pageSize": results_per_page,
  58. "pageNum": page_num,
  59. "paramList": f"page_num={page_num},page_size={results_per_page}",
  60. "position": 0,
  61. },
  62. },
  63. }
  64. query_params = category_config[baidu_category]['params']
  65. query_url = category_config[baidu_category]['endpoint']
  66. if params.get("time_range") in time_range_dict:
  67. now = int(time.time())
  68. past = now - time_range_dict[params["time_range"]]
  69. if baidu_category == 'general':
  70. query_params["gpc"] = f"stf={past},{now}|stftype=1"
  71. if baidu_category == 'it':
  72. query_params["paramList"] += f",timestamp_range={past}-{now}"
  73. params["url"] = f"{query_url}?{urlencode(query_params)}"
  74. return params
  75. def response(resp):
  76. text = resp.text
  77. if baidu_category == 'images':
  78. # baidu's JSON encoder wrongly quotes / and ' characters by \\ and \'
  79. text = text.replace(r"\/", "/").replace(r"\'", "'")
  80. data = json.loads(text, strict=False)
  81. parsers = {'general': parse_general, 'images': parse_images, 'it': parse_it}
  82. return parsers[baidu_category](data)
  83. def parse_general(data):
  84. results = []
  85. if not data.get("feed", {}).get("entry"):
  86. raise SearxEngineAPIException("Invalid response")
  87. for entry in data["feed"]["entry"]:
  88. if not entry.get("title") or not entry.get("url"):
  89. continue
  90. published_date = None
  91. if entry.get("time"):
  92. try:
  93. published_date = datetime.fromtimestamp(entry["time"])
  94. except (ValueError, TypeError):
  95. published_date = None
  96. # title and content sometimes containing characters such as & ' " etc...
  97. title = unescape(entry["title"])
  98. content = unescape(entry.get("abs", ""))
  99. results.append(
  100. {
  101. "title": title,
  102. "url": entry["url"],
  103. "content": content,
  104. "publishedDate": published_date,
  105. }
  106. )
  107. return results
  108. def parse_images(data):
  109. results = []
  110. if "data" in data:
  111. for item in data["data"]:
  112. if not item:
  113. # the last item in the JSON list is empty, the JSON string ends with "}, {}]"
  114. continue
  115. replace_url = item.get("replaceUrl", [{}])[0]
  116. width = item.get("width")
  117. height = item.get("height")
  118. img_date = item.get("bdImgnewsDate")
  119. publishedDate = None
  120. if img_date:
  121. publishedDate = datetime.strptime(img_date, "%Y-%m-%d %H:%M")
  122. results.append(
  123. {
  124. "template": "images.html",
  125. "url": replace_url.get("FromURL"),
  126. "thumbnail_src": item.get("thumbURL"),
  127. "img_src": replace_url.get("ObjURL"),
  128. "title": html_to_text(item.get("fromPageTitle")),
  129. "source": item.get("fromURLHost"),
  130. "resolution": f"{width} x {height}",
  131. "img_format": item.get("type"),
  132. "filesize": item.get("filesize"),
  133. "publishedDate": publishedDate,
  134. }
  135. )
  136. return results
  137. def parse_it(data):
  138. results = []
  139. if not data.get("data", {}).get("documents", {}).get("data"):
  140. raise SearxEngineAPIException("Invalid response")
  141. for entry in data["data"]["documents"]["data"]:
  142. results.append(
  143. {
  144. 'title': entry["techDocDigest"]["title"],
  145. 'url': entry["techDocDigest"]["url"],
  146. 'content': entry["techDocDigest"]["summary"],
  147. }
  148. )
  149. return results