360search.py 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # pylint: disable=invalid-name
  3. """360Search search engine for searxng"""
  4. from urllib.parse import urlencode
  5. from lxml import html
  6. from searx.utils import extract_text
  7. # Metadata
  8. about = {
  9. "website": "https://www.so.com/",
  10. "wikidata_id": "Q10846064",
  11. "use_official_api": False,
  12. "require_api_key": False,
  13. "results": "HTML",
  14. }
  15. # Engine Configuration
  16. categories = ["general"]
  17. paging = True
  18. time_range_support = True
  19. time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
  20. # Base URL
  21. base_url = "https://www.so.com"
  22. def request(query, params):
  23. query_params = {
  24. "pn": params["pageno"],
  25. "q": query,
  26. }
  27. if time_range_dict.get(params['time_range']):
  28. query_params["adv_t"] = time_range_dict.get(params['time_range'])
  29. params["url"] = f"{base_url}/s?{urlencode(query_params)}"
  30. return params
  31. def response(resp):
  32. dom = html.fromstring(resp.text)
  33. results = []
  34. for item in dom.xpath('//li[contains(@class, "res-list")]'):
  35. title = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a'))
  36. url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@data-mdurl'))
  37. if not url:
  38. url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@href'))
  39. content = extract_text(item.xpath('.//p[@class="res-desc"]'))
  40. if not content:
  41. content = extract_text(item.xpath('.//span[@class="res-list-summary"]'))
  42. if title and url:
  43. results.append(
  44. {
  45. "title": title,
  46. "url": url,
  47. "content": content,
  48. }
  49. )
  50. return results