Browse Source

Merge pull request #296 from return42/engine-logger

one logger per engine
Alexandre Flament 3 years ago
parent
commit
17e739a859

+ 2 - 1
manage

@@ -37,7 +37,7 @@ PYLINT_SEARX_DISABLE_OPTION="\
 I,C,R,\
 I,C,R,\
 W0105,W0212,W0511,W0603,W0613,W0621,W0702,W0703,W1401,\
 W0105,W0212,W0511,W0603,W0613,W0621,W0702,W0703,W1401,\
 E1136"
 E1136"
-PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES="supported_languages,language_aliases"
+PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES="supported_languages,language_aliases,logger"
 PYLINT_OPTIONS="-m pylint -j 0 --rcfile .pylintrc"
 PYLINT_OPTIONS="-m pylint -j 0 --rcfile .pylintrc"
 
 
 help() {
 help() {
@@ -588,6 +588,7 @@ test.pylint() {
     (   set -e
     (   set -e
         build_msg TEST "[pylint] \$PYLINT_FILES"
         build_msg TEST "[pylint] \$PYLINT_FILES"
         pyenv.cmd python ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
         pyenv.cmd python ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
+            --additional-builtins="${PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES}" \
             "${PYLINT_FILES[@]}"
             "${PYLINT_FILES[@]}"
 
 
         build_msg TEST "[pylint] searx/engines"
         build_msg TEST "[pylint] searx/engines"

+ 1 - 0
searx/engines/__init__.py

@@ -111,6 +111,7 @@ def load_engine(engine_data):
     if is_missing_required_attributes(engine):
     if is_missing_required_attributes(engine):
         return None
         return None
 
 
+    engine.logger = logger.getChild(engine_name)
     return engine
     return engine
 
 
 
 

+ 0 - 3
searx/engines/apkmirror.py

@@ -8,15 +8,12 @@
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
 
 
-from searx import logger
 from searx.utils import (
 from searx.utils import (
     eval_xpath_list,
     eval_xpath_list,
     eval_xpath_getindex,
     eval_xpath_getindex,
     extract_text,
     extract_text,
 )
 )
 
 
-logger = logger.getChild('APKMirror engine')
-
 about = {
 about = {
     "website": 'https://www.apkmirror.com',
     "website": 'https://www.apkmirror.com',
     "wikidata_id": None,
     "wikidata_id": None,

+ 0 - 3
searx/engines/artic.py

@@ -13,9 +13,6 @@ Explore thousands of artworks from The Art Institute of Chicago.
 from json import loads
 from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 
 
-from searx import logger
-logger = logger.getChild('APKMirror engine')
-
 about = {
 about = {
     "website": 'https://www.artic.edu',
     "website": 'https://www.artic.edu',
     "wikidata_id": 'Q239303',
     "wikidata_id": 'Q239303',

+ 0 - 3
searx/engines/bing.py

@@ -6,11 +6,8 @@
 import re
 import re
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
-from searx import logger
 from searx.utils import eval_xpath, extract_text, match_language
 from searx.utils import eval_xpath, extract_text, match_language
 
 
-logger = logger.getChild('bing engine')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://www.bing.com',
     "website": 'https://www.bing.com',

+ 0 - 5
searx/engines/core.py

@@ -9,11 +9,8 @@ from json import loads
 from datetime import datetime
 from datetime import datetime
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 
 
-from searx import logger
 from searx.exceptions import SearxEngineAPIException
 from searx.exceptions import SearxEngineAPIException
 
 
-logger = logger.getChild('CORE engine')
-
 about = {
 about = {
     "website": 'https://core.ac.uk',
     "website": 'https://core.ac.uk',
     "wikidata_id": 'Q22661180',
     "wikidata_id": 'Q22661180',
@@ -29,8 +26,6 @@ nb_per_page = 10
 
 
 api_key = 'unset'
 api_key = 'unset'
 
 
-logger = logger.getChild('CORE engine')
-
 base_url = 'https://core.ac.uk:443/api-v2/search/'
 base_url = 'https://core.ac.uk:443/api-v2/search/'
 search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'
 search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'
 
 

+ 0 - 3
searx/engines/duckduckgo_definitions.py

@@ -9,15 +9,12 @@ import json
 from urllib.parse import urlencode, urlparse, urljoin
 from urllib.parse import urlencode, urlparse, urljoin
 from lxml import html
 from lxml import html
 
 
-from searx import logger
 from searx.data import WIKIDATA_UNITS
 from searx.data import WIKIDATA_UNITS
 from searx.engines.duckduckgo import language_aliases
 from searx.engines.duckduckgo import language_aliases
 from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url  # NOQA # pylint: disable=unused-import
 from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url  # NOQA # pylint: disable=unused-import
 from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
 from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
 
 
-logger = logger.getChild('duckduckgo_definitions')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://duckduckgo.com/',
     "website": 'https://duckduckgo.com/',

+ 0 - 3
searx/engines/flickr_noapi.py

@@ -7,11 +7,8 @@ from json import loads
 from time import time
 from time import time
 import re
 import re
 from urllib.parse import urlencode
 from urllib.parse import urlencode
-from searx.engines import logger
 from searx.utils import ecma_unescape, html_to_text
 from searx.utils import ecma_unescape, html_to_text
 
 
-logger = logger.getChild('flickr-noapi')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://www.flickr.com',
     "website": 'https://www.flickr.com',

+ 0 - 3
searx/engines/genius.py

@@ -9,9 +9,6 @@ from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from datetime import datetime
 from datetime import datetime
 
 
-from searx import logger
-logger = logger.getChild('genius engine')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://genius.com/',
     "website": 'https://genius.com/',

+ 0 - 1
searx/engines/gigablast.py

@@ -8,7 +8,6 @@
 import re
 import re
 from json import loads
 from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
-# from searx import logger
 from searx.network import get
 from searx.network import get
 
 
 # about
 # about

+ 0 - 3
searx/engines/google.py

@@ -29,12 +29,9 @@ The google WEB engine itself has a special setup option:
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
-from searx import logger
 from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
 from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
 from searx.exceptions import SearxEngineCaptchaException
 from searx.exceptions import SearxEngineCaptchaException
 
 
-logger = logger.getChild('google engine')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://www.google.com',
     "website": 'https://www.google.com',

+ 0 - 3
searx/engines/google_images.py

@@ -16,7 +16,6 @@
 from urllib.parse import urlencode, unquote
 from urllib.parse import urlencode, unquote
 from lxml import html
 from lxml import html
 
 
-from searx import logger
 from searx.utils import (
 from searx.utils import (
     eval_xpath,
     eval_xpath,
     eval_xpath_list,
     eval_xpath_list,
@@ -37,8 +36,6 @@ from searx.engines.google import (
 )
 )
 # pylint: enable=unused-import
 # pylint: enable=unused-import
 
 
-logger = logger.getChild('google images')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://images.google.com',
     "website": 'https://images.google.com',

+ 0 - 3
searx/engines/google_news.py

@@ -20,7 +20,6 @@ from urllib.parse import urlencode
 from base64 import b64decode
 from base64 import b64decode
 from lxml import html
 from lxml import html
 
 
-from searx import logger
 from searx.utils import (
 from searx.utils import (
     eval_xpath,
     eval_xpath,
     eval_xpath_list,
     eval_xpath_list,
@@ -50,8 +49,6 @@ about = {
     "results": 'HTML',
     "results": 'HTML',
 }
 }
 
 
-logger = logger.getChild('google news')
-
 # compared to other google engines google-news has a different time range
 # compared to other google engines google-news has a different time range
 # support.  The time range is included in the search term.
 # support.  The time range is included in the search term.
 time_range_dict = {
 time_range_dict = {

+ 0 - 3
searx/engines/google_scholar.py

@@ -14,7 +14,6 @@ Definitions`_.
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from datetime import datetime
 from datetime import datetime
 from lxml import html
 from lxml import html
-from searx import logger
 
 
 from searx.utils import (
 from searx.utils import (
     eval_xpath,
     eval_xpath,
@@ -53,8 +52,6 @@ use_locale_domain = True
 time_range_support = True
 time_range_support = True
 safesearch = False
 safesearch = False
 
 
-logger = logger.getChild('google scholar')
-
 def time_range_url(params):
 def time_range_url(params):
     """Returns a URL query component for a google-Scholar time range based on
     """Returns a URL query component for a google-Scholar time range based on
     ``params['time_range']``.  Google-Scholar does only support ranges in years.
     ``params['time_range']``.  Google-Scholar does only support ranges in years.

+ 0 - 3
searx/engines/google_videos.py

@@ -20,7 +20,6 @@ import re
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
 
 
-from searx import logger
 from searx.utils import (
 from searx.utils import (
     eval_xpath,
     eval_xpath,
     eval_xpath_list,
     eval_xpath_list,
@@ -59,8 +58,6 @@ about = {
     "results": 'HTML',
     "results": 'HTML',
 }
 }
 
 
-logger = logger.getChild('google video')
-
 # engine dependent config
 # engine dependent config
 
 
 categories = ['videos']
 categories = ['videos']

+ 0 - 3
searx/engines/solidtorrents.py

@@ -8,9 +8,6 @@
 
 
 from json import loads
 from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
-from searx import logger
-
-logger = logger.getChild('solidtor engine')
 
 
 about = {
 about = {
     "website": 'https://www.solidtorrents.net/',
     "website": 'https://www.solidtorrents.net/',

+ 0 - 1
searx/engines/soundcloud.py

@@ -8,7 +8,6 @@ from json import loads
 from lxml import html
 from lxml import html
 from dateutil import parser
 from dateutil import parser
 from urllib.parse import quote_plus, urlencode
 from urllib.parse import quote_plus, urlencode
-from searx import logger
 from searx.network import get as http_get
 from searx.network import get as http_get
 
 
 # about
 # about

+ 0 - 3
searx/engines/springer.py

@@ -10,11 +10,8 @@ from datetime import datetime
 from json import loads
 from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 
 
-from searx import logger
 from searx.exceptions import SearxEngineAPIException
 from searx.exceptions import SearxEngineAPIException
 
 
-logger = logger.getChild('Springer Nature engine')
-
 about = {
 about = {
     "website": 'https://www.springernature.com/',
     "website": 'https://www.springernature.com/',
     "wikidata_id": 'Q21096327',
     "wikidata_id": 'Q21096327',

+ 0 - 5
searx/engines/sqlite.py

@@ -9,11 +9,6 @@
 import sqlite3
 import sqlite3
 import contextlib
 import contextlib
 
 
-from searx import logger
-
-
-logger = logger.getChild('SQLite engine')
-
 engine_type = 'offline'
 engine_type = 'offline'
 database = ""
 database = ""
 query_str = ""
 query_str = ""

+ 0 - 3
searx/engines/unsplash.py

@@ -8,9 +8,6 @@
 from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
 from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
 from json import loads
 from json import loads
 
 
-from searx import logger
-
-logger = logger.getChild('unsplash engine')
 # about
 # about
 about = {
 about = {
     "website": 'https://unsplash.com',
     "website": 'https://unsplash.com',

+ 0 - 3
searx/engines/wikidata.py

@@ -10,15 +10,12 @@ from json import loads
 from dateutil.parser import isoparse
 from dateutil.parser import isoparse
 from babel.dates import format_datetime, format_date, format_time, get_datetime_format
 from babel.dates import format_datetime, format_date, format_time, get_datetime_format
 
 
-from searx import logger
 from searx.data import WIKIDATA_UNITS
 from searx.data import WIKIDATA_UNITS
 from searx.network import post, get
 from searx.network import post, get
 from searx.utils import match_language, searx_useragent, get_string_replaces_function
 from searx.utils import match_language, searx_useragent, get_string_replaces_function
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
 from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url  # NOQA # pylint: disable=unused-import
 from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url  # NOQA # pylint: disable=unused-import
 
 
-logger = logger.getChild('wikidata')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://wikidata.org/',
     "website": 'https://wikidata.org/',

+ 0 - 3
searx/engines/wordnik.py

@@ -4,12 +4,9 @@
 """
 """
 
 
 from lxml.html import fromstring
 from lxml.html import fromstring
-from searx import logger
 from searx.utils import extract_text
 from searx.utils import extract_text
 from searx.network import raise_for_httperror
 from searx.network import raise_for_httperror
 
 
-logger = logger.getChild('Wordnik engine')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://www.wordnik.com',
     "website": 'https://www.wordnik.com',

+ 0 - 3
searx/engines/xpath.py

@@ -23,9 +23,6 @@ from urllib.parse import urlencode
 
 
 from lxml import html
 from lxml import html
 from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
 from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
-from searx import logger
-
-logger = logger.getChild('XPath engine')
 
 
 search_url = None
 search_url = None
 """
 """

+ 0 - 3
searx/engines/yahoo_news.py

@@ -14,7 +14,6 @@ from datetime import datetime, timedelta
 from dateutil import parser
 from dateutil import parser
 from lxml import html
 from lxml import html
 
 
-from searx import logger
 from searx.utils import (
 from searx.utils import (
     eval_xpath_list,
     eval_xpath_list,
     eval_xpath_getindex,
     eval_xpath_getindex,
@@ -23,8 +22,6 @@ from searx.utils import (
 
 
 from searx.engines.yahoo import parse_url
 from searx.engines.yahoo import parse_url
 
 
-logger = logger.getChild('yahoo_news engine')
-
 # about
 # about
 about = {
 about = {
     "website": 'https://news.yahoo.com',
     "website": 'https://news.yahoo.com',