Browse Source

[feat] engines: add Naver engine (#4573)

Refactor Naver engine (Web, News, Images, Videos, Autocomplete)

- ref: https://search.naver.com/
- lang: `ko`
- Wikidata: https://www.wikidata.org/wiki/Q485639

Co-authored-by: Bnyro <bnyro@tutanota.com>
Zhijie He 1 day ago
parent
commit
156d1eb8c8
5 changed files with 256 additions and 18 deletions
  1. 1 0
      docs/admin/settings/settings_search.rst
  2. 16 0
      searx/autocomplete.py
  3. 210 0
      searx/engines/naver.py
  4. 24 18
      searx/settings.yml
  5. 5 0
      searx/utils.py

+ 1 - 0
docs/admin/settings/settings_search.rst

@@ -41,6 +41,7 @@
   - ``duckduckgo``
   - ``google``
   - ``mwmbl``
+  - ``naver``
   - ``quark``
   - ``qwant``
   - ``seznam``

+ 16 - 0
searx/autocomplete.py

@@ -149,6 +149,21 @@ def mwmbl(query, _lang):
     return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
 
 
+def naver(query, _lang):
+    # Naver search autocompleter
+    url = f"https://ac.search.naver.com/nx/ac?{urlencode({'q': query, 'r_format': 'json', 'st': 0})}"
+    response = get(url)
+
+    results = []
+
+    if response.ok:
+        data = response.json()
+        if data.get('items'):
+            for item in data['items'][0]:
+                results.append(item[0])
+    return results
+
+
 def qihu360search(query, _lang):
     # 360Search search autocompleter
     url = f"https://sug.so.360.cn/suggest?{urlencode({'format': 'json', 'word': query})}"
@@ -300,6 +315,7 @@ backends = {
     'duckduckgo': duckduckgo,
     'google': google_complete,
     'mwmbl': mwmbl,
+    'naver': naver,
     'quark': quark,
     'qwant': qwant,
     'seznam': seznam,

+ 210 - 0
searx/engines/naver.py

@@ -0,0 +1,210 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# pylint: disable=line-too-long
+"""Naver for SearXNG"""
+
+from urllib.parse import urlencode
+from lxml import html
+
+from searx.exceptions import SearxEngineAPIException, SearxEngineXPathException
+from searx.result_types import EngineResults, MainResult
+from searx.utils import (
+    eval_xpath_getindex,
+    eval_xpath_list,
+    eval_xpath,
+    extract_text,
+    extr,
+    html_to_text,
+    parse_duration_string,
+    js_variable_to_python,
+)
+
+# engine metadata
+about = {
+    "website": "https://search.naver.com",
+    "wikidata_id": "Q485639",
+    "use_official_api": False,
+    "require_api_key": False,
+    "results": "HTML",
+    "language": "ko",
+}
+
+categories = []
+paging = True
+
+time_range_support = True
+time_range_dict = {"day": "1d", "week": "1w", "month": "1m", "year": "1y"}
+
+base_url = "https://search.naver.com"
+
+naver_category = "general"
+"""Naver supports general, images, news, videos search.
+
+- ``general``: search for general
+- ``images``: search for images
+- ``news``: search for news
+- ``videos``: search for videos
+"""
+
+# Naver cannot set the number of results on one page, set default value for paging
+naver_category_dict = {
+    "general": {
+        "start": 15,
+        "where": "web",
+    },
+    "images": {
+        "start": 50,
+        "where": "image",
+    },
+    "news": {
+        "start": 10,
+        "where": "news",
+    },
+    "videos": {
+        "start": 48,
+        "where": "video",
+    },
+}
+
+
+def init(_):
+    if naver_category not in ('general', 'images', 'news', 'videos'):
+        raise SearxEngineAPIException(f"Unsupported category: {naver_category}")
+
+
+def request(query, params):
+    query_params = {
+        "query": query,
+    }
+
+    if naver_category in naver_category_dict:
+        query_params["start"] = (params["pageno"] - 1) * naver_category_dict[naver_category]["start"] + 1
+        query_params["where"] = naver_category_dict[naver_category]["where"]
+
+    if params["time_range"] in time_range_dict:
+        query_params["nso"] = f"p:{time_range_dict[params['time_range']]}"
+
+    params["url"] = f"{base_url}/search.naver?{urlencode(query_params)}"
+    return params
+
+
+def response(resp) -> EngineResults:
+    parsers = {'general': parse_general, 'images': parse_images, 'news': parse_news, 'videos': parse_videos}
+
+    return parsers[naver_category](resp.text)
+
+
+def parse_general(data):
+    results = EngineResults()
+
+    dom = html.fromstring(data)
+
+    for item in eval_xpath_list(dom, "//ul[contains(@class, 'lst_total')]/li[contains(@class, 'bx')]"):
+        thumbnail = None
+        try:
+            thumbnail = eval_xpath_getindex(item, ".//div[contains(@class, 'thumb_single')]//img/@data-lazysrc", 0)
+        except (ValueError, TypeError, SearxEngineXPathException):
+            pass
+
+        results.add(
+            MainResult(
+                title=extract_text(eval_xpath(item, ".//a[contains(@class, 'link_tit')]")),
+                url=eval_xpath_getindex(item, ".//a[contains(@class, 'link_tit')]/@href", 0),
+                content=extract_text(
+                    eval_xpath(item, ".//div[contains(@class, 'total_dsc_wrap')]//a[contains(@class, 'api_txt_lines')]")
+                ),
+                thumbnail=thumbnail,
+            )
+        )
+
+    return results
+
+
+def parse_images(data):
+    results = []
+
+    match = extr(data, '<script>var imageSearchTabData=', '</script>')
+    if match:
+        json = js_variable_to_python(match.strip())
+        items = json.get('content', {}).get('items', [])
+
+        for item in items:
+            results.append(
+                {
+                    "template": "images.html",
+                    "url": item.get('link'),
+                    "thumbnail_src": item.get('thumb'),
+                    "img_src": item.get('originalUrl'),
+                    "title": html_to_text(item.get('title')),
+                    "source": item.get('source'),
+                    "resolution": f"{item.get('orgWidth')} x {item.get('orgHeight')}",
+                }
+            )
+
+    return results
+
+
+def parse_news(data):
+    results = EngineResults()
+    dom = html.fromstring(data)
+
+    for item in eval_xpath_list(
+        dom, "//div[contains(@class, 'sds-comps-base-layout') and contains(@class, 'sds-comps-full-layout')]"
+    ):
+        title = extract_text(eval_xpath(item, ".//span[contains(@class, 'sds-comps-text-type-headline1')]/text()"))
+
+        url = eval_xpath_getindex(item, ".//a[@href and @nocr='1']/@href", 0)
+
+        content = extract_text(eval_xpath(item, ".//span[contains(@class, 'sds-comps-text-type-body1')]"))
+
+        thumbnail = None
+        try:
+            thumbnail = eval_xpath_getindex(
+                item,
+                ".//div[contains(@class, 'sds-comps-image') and contains(@class, 'sds-rego-thumb-overlay')]//img[@src]/@src",
+                0,
+            )
+        except (ValueError, TypeError, SearxEngineXPathException):
+            pass
+
+        if title and content and url:
+            results.add(
+                MainResult(
+                    title=title,
+                    url=url,
+                    content=content,
+                    thumbnail=thumbnail,
+                )
+            )
+
+    return results
+
+
+def parse_videos(data):
+    results = []
+
+    dom = html.fromstring(data)
+
+    for item in eval_xpath_list(dom, "//li[contains(@class, 'video_item')]"):
+        thumbnail = None
+        try:
+            thumbnail = eval_xpath_getindex(item, ".//img[contains(@class, 'thumb')]/@src", 0)
+        except (ValueError, TypeError, SearxEngineXPathException):
+            pass
+
+        length = None
+        try:
+            length = parse_duration_string(extract_text(eval_xpath(item, ".//span[contains(@class, 'time')]")))
+        except (ValueError, TypeError):
+            pass
+
+        results.append(
+            {
+                "template": "videos.html",
+                "title": extract_text(eval_xpath(item, ".//a[contains(@class, 'info_title')]")),
+                "url": eval_xpath_getindex(item, ".//a[contains(@class, 'info_title')]/@href", 0),
+                "thumbnail": thumbnail,
+                'length': length,
+            }
+        )
+
+    return results

+ 24 - 18
searx/settings.yml

@@ -34,7 +34,7 @@ search:
   # Filter results. 0: None, 1: Moderate, 2: Strict
   safe_search: 0
   # Existing autocomplete backends: "360search", "baidu", "brave", "dbpedia", "duckduckgo", "google", "yandex",
-  # "mwmbl", "seznam", "sogou", "stract", "swisscows", "quark", "qwant", "wikipedia" -
+  # "mwmbl", "naver", "seznam", "sogou", "stract", "swisscows", "quark", "qwant", "wikipedia" -
   # leave blank to turn it off by default.
   autocomplete: ""
   # minimun characters to type before autocompleter starts
@@ -2360,25 +2360,31 @@ engines:
     disabled: true
 
   - name: naver
-    shortcut: nvr
     categories: [general, web]
-    engine: xpath
-    paging: true
-    search_url: https://search.naver.com/search.naver?where=webkr&sm=osp_hty&ie=UTF-8&query={query}&start={pageno}
-    url_xpath: //a[@class="link_tit"]/@href
-    title_xpath: //a[@class="link_tit"]
-    content_xpath: //div[@class="total_dsc_wrap"]/a
-    first_page_num: 1
-    page_size: 10
+    engine: naver
+    shortcut: nvr
+    disabled: true
+
+  - name: naver images
+    naver_category: images
+    categories: [images]
+    engine: naver
+    shortcut: nvri
+    disabled: true
+
+  - name: naver news
+    naver_category: news
+    categories: [news]
+    engine: naver
+    shortcut: nvrn
+    disabled: true
+
+  - name: naver videos
+    naver_category: videos
+    categories: [videos]
+    engine: naver
+    shortcut: nvrv
     disabled: true
-    about:
-      website: https://www.naver.com/
-      wikidata_id: Q485639
-      official_api_documentation: https://developers.naver.com/docs/nmt/examples/
-      use_official_api: false
-      require_api_key: false
-      results: HTML
-      language: ko
 
   - name: rubygems
     shortcut: rbg

+ 5 - 0
searx/utils.py

@@ -830,6 +830,11 @@ def js_variable_to_python(js_variable):
     s = _JS_DECIMAL_RE.sub(":0.", s)
     # replace the surogate character by colon
     s = s.replace(chr(1), ':')
+    # replace single-quote followed by comma with double-quote and comma
+    # {"a": "\"12\"',"b": "13"}
+    # becomes
+    # {"a": "\"12\"","b": "13"}
+    s = s.replace("',", "\",")
     # load the JSON and return the result
     return json.loads(s)