quark.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Quark (Shenma) search engine for searxng"""
  3. from urllib.parse import urlencode
  4. from datetime import datetime
  5. import re
  6. import json
  7. from searx.utils import html_to_text
  8. from searx.exceptions import SearxEngineAPIException, SearxEngineCaptchaException
  9. # Metadata
  10. about = {
  11. "website": "https://quark.sm.cn/",
  12. "wikidata_id": "Q48816502",
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": "HTML",
  16. "language": "zh",
  17. }
  18. # Engine Configuration
  19. categories = []
  20. paging = True
  21. results_per_page = 10
  22. quark_category = 'general'
  23. time_range_support = True
  24. time_range_dict = {'day': '4', 'week': '3', 'month': '2', 'year': '1'}
  25. CAPTCHA_PATTERN = r'\{[^{]*?"action"\s*:\s*"captcha"\s*,\s*"url"\s*:\s*"([^"]+)"[^{]*?\}'
  26. def is_alibaba_captcha(html):
  27. """
  28. Detects if the response contains an Alibaba X5SEC CAPTCHA page.
  29. Quark may return a CAPTCHA challenge after 9 requests in a short period.
  30. Typically, the ban duration is around 15 minutes.
  31. """
  32. return bool(re.search(CAPTCHA_PATTERN, html))
  33. def init(_):
  34. if quark_category not in ('general', 'images'):
  35. raise SearxEngineAPIException(f"Unsupported category: {quark_category}")
  36. def request(query, params):
  37. page_num = params["pageno"]
  38. category_config = {
  39. 'general': {
  40. 'endpoint': 'https://quark.sm.cn/s',
  41. 'params': {
  42. "q": query,
  43. "layout": "html",
  44. "page": page_num,
  45. },
  46. },
  47. 'images': {
  48. 'endpoint': 'https://vt.sm.cn/api/pic/list',
  49. 'params': {
  50. "query": query,
  51. "limit": results_per_page,
  52. "start": (page_num - 1) * results_per_page,
  53. },
  54. },
  55. }
  56. query_params = category_config[quark_category]['params']
  57. query_url = category_config[quark_category]['endpoint']
  58. if time_range_dict.get(params['time_range']) and quark_category == 'general':
  59. query_params["tl_request"] = time_range_dict.get(params['time_range'])
  60. params["url"] = f"{query_url}?{urlencode(query_params)}"
  61. return params
  62. def response(resp):
  63. results = []
  64. text = resp.text
  65. if is_alibaba_captcha(text):
  66. raise SearxEngineCaptchaException(
  67. suspended_time=900, message="Alibaba CAPTCHA detected. Please try again later."
  68. )
  69. if quark_category == 'images':
  70. data = json.loads(text)
  71. for item in data.get('data', {}).get('hit', {}).get('imgInfo', {}).get('item', []):
  72. try:
  73. published_date = datetime.fromtimestamp(int(item.get("publish_time")))
  74. except (ValueError, TypeError):
  75. published_date = None
  76. results.append(
  77. {
  78. "template": "images.html",
  79. "url": item.get("imgUrl"),
  80. "thumbnail_src": item.get("img"),
  81. "img_src": item.get("bigPicUrl"),
  82. "title": item.get("title"),
  83. "source": item.get("site"),
  84. "resolution": f"{item['width']} x {item['height']}",
  85. "publishedDate": published_date,
  86. }
  87. )
  88. if quark_category == 'general':
  89. # Quark returns a variety of different sc values on a single page, depending on the query type.
  90. source_category_parsers = {
  91. 'addition': parse_addition,
  92. 'ai_page': parse_ai_page,
  93. 'baike_sc': parse_baike_sc,
  94. 'finance_shuidi': parse_finance_shuidi,
  95. 'kk_yidian_all': parse_kk_yidian_all,
  96. 'life_show_general_image': parse_life_show_general_image,
  97. 'med_struct': parse_med_struct,
  98. 'music_new_song': parse_music_new_song,
  99. 'nature_result': parse_nature_result,
  100. 'news_uchq': parse_news_uchq,
  101. 'ss_note': parse_ss_note,
  102. # ss_kv, ss_pic, ss_text, ss_video, baike, structure_web_novel use the same struct as ss_doc
  103. 'ss_doc': parse_ss_doc,
  104. 'ss_kv': parse_ss_doc,
  105. 'ss_pic': parse_ss_doc,
  106. 'ss_text': parse_ss_doc,
  107. 'ss_video': parse_ss_doc,
  108. 'baike': parse_ss_doc,
  109. 'structure_web_novel': parse_ss_doc,
  110. 'travel_dest_overview': parse_travel_dest_overview,
  111. 'travel_ranking_list': parse_travel_ranking_list,
  112. }
  113. pattern = r'<script\s+type="application/json"\s+id="s-data-[^"]+"\s+data-used-by="hydrate">(.*?)</script>'
  114. matches = re.findall(pattern, text, re.DOTALL)
  115. for match in matches:
  116. data = json.loads(match)
  117. initial_data = data.get('data', {}).get('initialData', {})
  118. extra_data = data.get('extraData', {})
  119. source_category = extra_data.get('sc')
  120. parsers = source_category_parsers.get(source_category)
  121. if parsers:
  122. parsed_results = parsers(initial_data)
  123. if isinstance(parsed_results, list):
  124. # Extend if the result is a list
  125. results.extend(parsed_results)
  126. else:
  127. # Append if it's a single result
  128. results.append(parsed_results)
  129. return results
  130. def parse_addition(data):
  131. return {
  132. "title": html_to_text(data.get('title', {}).get('content')),
  133. "url": data.get('source', {}).get('url'),
  134. "content": html_to_text(data.get('summary', {}).get('content')),
  135. }
  136. def parse_ai_page(data):
  137. results = []
  138. for item in data.get('list', []):
  139. content = (
  140. " | ".join(map(str, item.get('content', [])))
  141. if isinstance(item.get('content'), list)
  142. else str(item.get('content'))
  143. )
  144. try:
  145. published_date = datetime.fromtimestamp(int(item.get('source', {}).get('time')))
  146. except (ValueError, TypeError):
  147. published_date = None
  148. results.append(
  149. {
  150. "title": html_to_text(item.get('title')),
  151. "url": item.get('url'),
  152. "content": html_to_text(content),
  153. "publishedDate": published_date,
  154. }
  155. )
  156. return results
  157. def parse_baike_sc(data):
  158. return {
  159. "title": html_to_text(data.get('data', {}).get('title')),
  160. "url": data.get('data', {}).get('url'),
  161. "content": html_to_text(data.get('data', {}).get('abstract')),
  162. "thumbnail": data.get('data', {}).get('img').replace("http://", "https://"),
  163. }
  164. def parse_finance_shuidi(data):
  165. content = " | ".join(
  166. (
  167. info
  168. for info in [
  169. data.get('establish_time'),
  170. data.get('company_status'),
  171. data.get('controled_type'),
  172. data.get('company_type'),
  173. data.get('capital'),
  174. data.get('address'),
  175. data.get('business_scope'),
  176. ]
  177. if info
  178. )
  179. )
  180. return {
  181. "title": html_to_text(data.get('company_name')),
  182. "url": data.get('title_url'),
  183. "content": html_to_text(content),
  184. }
  185. def parse_kk_yidian_all(data):
  186. content_list = []
  187. for section in data.get('list_container', []):
  188. for item in section.get('list_container', []):
  189. if 'dot_text' in item:
  190. content_list.append(item['dot_text'])
  191. return {
  192. "title": html_to_text(data.get('title')),
  193. "url": data.get('title_url'),
  194. "content": html_to_text(' '.join(content_list)),
  195. }
  196. def parse_life_show_general_image(data):
  197. results = []
  198. for item in data.get('image', []):
  199. try:
  200. published_date = datetime.fromtimestamp(int(item.get("publish_time")))
  201. except (ValueError, TypeError):
  202. published_date = None
  203. results.append(
  204. {
  205. "template": "images.html",
  206. "url": item.get("imgUrl"),
  207. "thumbnail_src": item.get("img"),
  208. "img_src": item.get("bigPicUrl"),
  209. "title": item.get("title"),
  210. "source": item.get("site"),
  211. "resolution": f"{item['width']} x {item['height']}",
  212. "publishedDate": published_date,
  213. }
  214. )
  215. return results
  216. def parse_med_struct(data):
  217. return {
  218. "title": html_to_text(data.get('title')),
  219. "url": data.get('message', {}).get('statistics', {}).get('nu'),
  220. "content": html_to_text(data.get('message', {}).get('content_text')),
  221. "thumbnail": data.get('message', {}).get('video_img').replace("http://", "https://"),
  222. }
  223. def parse_music_new_song(data):
  224. results = []
  225. for item in data.get('hit3', []):
  226. results.append(
  227. {
  228. "title": f"{item['song_name']} | {item['song_singer']}",
  229. "url": item.get("play_url"),
  230. "content": html_to_text(item.get("lyrics")),
  231. "thumbnail": item.get("image_url").replace("http://", "https://"),
  232. }
  233. )
  234. return results
  235. def parse_nature_result(data):
  236. return {"title": html_to_text(data.get('title')), "url": data.get('url'), "content": html_to_text(data.get('desc'))}
  237. def parse_news_uchq(data):
  238. results = []
  239. for item in data.get('feed', []):
  240. try:
  241. published_date = datetime.strptime(item.get('time'), "%Y-%m-%d")
  242. except (ValueError, TypeError):
  243. # Sometime Quark will return non-standard format like "1天前", set published_date as None
  244. published_date = None
  245. results.append(
  246. {
  247. "title": html_to_text(item.get('title')),
  248. "url": item.get('url'),
  249. "content": html_to_text(item.get('summary')),
  250. "thumbnail": item.get('image').replace("http://", "https://"),
  251. "publishedDate": published_date,
  252. }
  253. )
  254. return results
  255. def parse_ss_doc(data):
  256. published_date = None
  257. try:
  258. timestamp = int(data.get('sourceProps', {}).get('time'))
  259. # Sometime Quark will return 0, set published_date as None
  260. if timestamp != 0:
  261. published_date = datetime.fromtimestamp(timestamp)
  262. except (ValueError, TypeError):
  263. pass
  264. try:
  265. thumbnail = data.get('picListProps', [])[0].get('src').replace("http://", "https://")
  266. except (ValueError, TypeError, IndexError):
  267. thumbnail = None
  268. return {
  269. "title": html_to_text(
  270. data.get('titleProps', {}).get('content')
  271. # ss_kv variant 1 & 2
  272. or data.get('title')
  273. ),
  274. "url": data.get('sourceProps', {}).get('dest_url')
  275. # ss_kv variant 1
  276. or data.get('normal_url')
  277. # ss_kv variant 2
  278. or data.get('url'),
  279. "content": html_to_text(
  280. data.get('summaryProps', {}).get('content')
  281. # ss_doc variant 1
  282. or data.get('message', {}).get('replyContent')
  283. # ss_kv variant 1
  284. or data.get('show_body')
  285. # ss_kv variant 2
  286. or data.get('desc')
  287. ),
  288. "publishedDate": published_date,
  289. "thumbnail": thumbnail,
  290. }
  291. def parse_ss_note(data):
  292. try:
  293. published_date = datetime.fromtimestamp(int(data.get('source', {}).get('time')))
  294. except (ValueError, TypeError):
  295. published_date = None
  296. return {
  297. "title": html_to_text(data.get('title', {}).get('content')),
  298. "url": data.get('source', {}).get('dest_url'),
  299. "content": html_to_text(data.get('summary', {}).get('content')),
  300. "publishedDate": published_date,
  301. }
  302. def parse_travel_dest_overview(data):
  303. return {
  304. "title": html_to_text(data.get('strong', {}).get('title')),
  305. "url": data.get('strong', {}).get('baike_url'),
  306. "content": html_to_text(data.get('strong', {}).get('baike_text')),
  307. }
  308. def parse_travel_ranking_list(data):
  309. return {
  310. "title": html_to_text(data.get('title', {}).get('text')),
  311. "url": data.get('title', {}).get('url'),
  312. "content": html_to_text(data.get('title', {}).get('title_tag')),
  313. }