Browse Source

[mod] remove unused import

use
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url  # NOQA
so it is possible to easily remove all unused import using autoflake:
autoflake --in-place --recursive --remove-all-unused-imports searx tests
Alexandre Flament 4 years ago
parent
commit
3038052c79

+ 1 - 1
searx/engines/acgsou.py

@@ -11,7 +11,7 @@
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
-from searx.utils import extract_text, get_torrent_size, int_or_zero
+from searx.utils import extract_text, get_torrent_size
 
 
 # engine dependent config
 # engine dependent config
 categories = ['files', 'images', 'videos', 'music']
 categories = ['files', 'images', 'videos', 'music']

+ 0 - 1
searx/engines/arxiv.py

@@ -11,7 +11,6 @@
  More info on api: https://arxiv.org/help/api/user-manual
  More info on api: https://arxiv.org/help/api/user-manual
 """
 """
 
 
-from urllib.parse import urlencode
 from lxml import html
 from lxml import html
 from datetime import datetime
 from datetime import datetime
 
 

+ 2 - 3
searx/engines/bing.py

@@ -16,8 +16,8 @@
 import re
 import re
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
-from searx import logger, utils
-from searx.utils import extract_text, match_language, gen_useragent, eval_xpath
+from searx import logger
+from searx.utils import eval_xpath, extract_text, match_language
 
 
 logger = logger.getChild('bing engine')
 logger = logger.getChild('bing engine')
 
 
@@ -98,7 +98,6 @@ def response(resp):
             result_len = int(result_len_container)
             result_len = int(result_len_container)
     except Exception as e:
     except Exception as e:
         logger.debug('result error :\n%s', e)
         logger.debug('result error :\n%s', e)
-        pass
 
 
     if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
     if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
         return []
         return []

+ 2 - 2
searx/engines/bing_images.py

@@ -15,10 +15,10 @@
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
 from json import loads
 from json import loads
-import re
 from searx.utils import match_language
 from searx.utils import match_language
 
 
-from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
+from searx.engines.bing import language_aliases
+from searx.engines.bing import _fetch_supported_languages, supported_languages_url  # NOQA
 
 
 # engine dependent config
 # engine dependent config
 categories = ['images']
 categories = ['images']

+ 2 - 1
searx/engines/bing_news.py

@@ -16,7 +16,8 @@ from dateutil import parser
 from urllib.parse import urlencode, urlparse, parse_qsl
 from urllib.parse import urlencode, urlparse, parse_qsl
 from lxml import etree
 from lxml import etree
 from searx.utils import list_get, match_language
 from searx.utils import list_get, match_language
-from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
+from searx.engines.bing import language_aliases
+from searx.engines.bing import _fetch_supported_languages, supported_languages_url  # NOQA
 
 
 # engine dependent config
 # engine dependent config
 categories = ['news']
 categories = ['news']

+ 2 - 1
searx/engines/bing_videos.py

@@ -15,7 +15,8 @@ from lxml import html
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from searx.utils import match_language
 from searx.utils import match_language
 
 
-from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
+from searx.engines.bing import language_aliases
+from searx.engines.bing import _fetch_supported_languages, supported_languages_url  # NOQA
 
 
 categories = ['videos']
 categories = ['videos']
 paging = True
 paging = True

+ 0 - 1
searx/engines/btdigg.py

@@ -11,7 +11,6 @@
 """
 """
 
 
 from lxml import html
 from lxml import html
-from operator import itemgetter
 from urllib.parse import quote, urljoin
 from urllib.parse import quote, urljoin
 from searx.utils import extract_text, get_torrent_size
 from searx.utils import extract_text, get_torrent_size
 
 

+ 0 - 1
searx/engines/command.py

@@ -18,7 +18,6 @@ import re
 from os.path import expanduser, isabs, realpath, commonprefix
 from os.path import expanduser, isabs, realpath, commonprefix
 from shlex import split as shlex_split
 from shlex import split as shlex_split
 from subprocess import Popen, PIPE
 from subprocess import Popen, PIPE
-from time import time
 from threading import Thread
 from threading import Thread
 
 
 from searx import logger
 from searx import logger

+ 1 - 4
searx/engines/currency_convert.py

@@ -1,10 +1,7 @@
 import json
 import json
 import re
 import re
 import unicodedata
 import unicodedata
-
-from datetime import datetime
-
-from searx.data import CURRENCIES
+from searx.data import CURRENCIES  # NOQA
 
 
 
 
 categories = []
 categories = []

+ 0 - 1
searx/engines/deviantart.py

@@ -15,7 +15,6 @@
 from lxml import html
 from lxml import html
 import re
 import re
 from urllib.parse import urlencode
 from urllib.parse import urlencode
-from searx.utils import extract_text
 
 
 
 
 # engine dependent config
 # engine dependent config

+ 0 - 2
searx/engines/digg.py

@@ -12,10 +12,8 @@
 
 
 import random
 import random
 import string
 import string
-from dateutil import parser
 from json import loads
 from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
-from lxml import html
 from datetime import datetime
 from datetime import datetime
 
 
 # engine dependent config
 # engine dependent config

+ 0 - 2
searx/engines/duckduckgo.py

@@ -15,8 +15,6 @@
 
 
 from lxml.html import fromstring
 from lxml.html import fromstring
 from json import loads
 from json import loads
-from urllib.parse import urlencode
-from searx.poolrequests import get
 from searx.utils import extract_text, match_language, eval_xpath
 from searx.utils import extract_text, match_language, eval_xpath
 
 
 # engine dependent config
 # engine dependent config

+ 2 - 2
searx/engines/duckduckgo_definitions.py

@@ -15,11 +15,11 @@ from lxml import html
 
 
 from searx import logger
 from searx import logger
 from searx.data import WIKIDATA_UNITS
 from searx.data import WIKIDATA_UNITS
-from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases
+from searx.engines.duckduckgo import language_aliases
+from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url  # NOQA
 from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
 from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
 
 
-
 logger = logger.getChild('duckduckgo_definitions')
 logger = logger.getChild('duckduckgo_definitions')
 
 
 URL = 'https://api.duckduckgo.com/'\
 URL = 'https://api.duckduckgo.com/'\

+ 2 - 5
searx/engines/duckduckgo_images.py

@@ -15,12 +15,9 @@
 
 
 from json import loads
 from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
-from searx.engines.duckduckgo import (
-    _fetch_supported_languages, supported_languages_url,
-    get_region_code, language_aliases
-)
+from searx.engines.duckduckgo import get_region_code
+from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url  # NOQA
 from searx.poolrequests import get
 from searx.poolrequests import get
-from searx.utils import extract_text
 
 
 # engine dependent config
 # engine dependent config
 categories = ['images']
 categories = ['images']

+ 0 - 1
searx/engines/duden.py

@@ -60,7 +60,6 @@ def response(resp):
 
 
     except:
     except:
         logger.debug("Couldn't read number of results.")
         logger.debug("Couldn't read number of results.")
-        pass
 
 
     for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
     for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
         try:
         try:

+ 0 - 3
searx/engines/elasticsearch.py

@@ -1,8 +1,5 @@
 from json import loads, dumps
 from json import loads, dumps
-from lxml import html
-from urllib.parse import quote, urljoin
 from requests.auth import HTTPBasicAuth
 from requests.auth import HTTPBasicAuth
-from searx.utils import extract_text, get_torrent_size
 
 
 
 
 base_url = 'http://localhost:9200'
 base_url = 'http://localhost:9200'

+ 1 - 4
searx/engines/google_images.py

@@ -29,12 +29,9 @@ from lxml import html
 from flask_babel import gettext
 from flask_babel import gettext
 from searx import logger
 from searx import logger
 from searx.utils import extract_text, eval_xpath
 from searx.utils import extract_text, eval_xpath
+from searx.engines.google import _fetch_supported_languages, supported_languages_url  # NOQA
 
 
 # pylint: disable=unused-import
 # pylint: disable=unused-import
-from searx.engines.google import (
-    supported_languages_url,
-    _fetch_supported_languages,
-)
 # pylint: enable=unused-import
 # pylint: enable=unused-import
 
 
 from searx.engines.google import (
 from searx.engines.google import (

+ 1 - 1
searx/engines/google_news.py

@@ -12,8 +12,8 @@
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
-from searx.engines.google import _fetch_supported_languages, supported_languages_url
 from searx.utils import match_language
 from searx.utils import match_language
+from searx.engines.google import _fetch_supported_languages, supported_languages_url  # NOQA
 
 
 # search-url
 # search-url
 categories = ['news']
 categories = ['news']

+ 0 - 1
searx/engines/google_videos.py

@@ -11,7 +11,6 @@
 """
 """
 
 
 from datetime import date, timedelta
 from datetime import date, timedelta
-from json import loads
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
 from searx.utils import extract_text
 from searx.utils import extract_text

+ 2 - 2
searx/engines/piratebay.py

@@ -12,8 +12,8 @@ from json import loads
 from datetime import datetime
 from datetime import datetime
 from operator import itemgetter
 from operator import itemgetter
 
 
-from urllib.parse import quote, urljoin
-from searx.utils import extract_text, get_torrent_size
+from urllib.parse import quote
+from searx.utils import get_torrent_size
 
 
 # engine dependent config
 # engine dependent config
 categories = ["videos", "music", "files"]
 categories = ["videos", "music", "files"]

+ 0 - 1
searx/engines/soundcloud.py

@@ -14,7 +14,6 @@ import re
 from json import loads
 from json import loads
 from lxml import html
 from lxml import html
 from dateutil import parser
 from dateutil import parser
-from io import StringIO
 from urllib.parse import quote_plus, urlencode
 from urllib.parse import quote_plus, urlencode
 from searx import logger
 from searx import logger
 from searx.poolrequests import get as http_get
 from searx.poolrequests import get as http_get

+ 0 - 1
searx/engines/startpage.py

@@ -17,7 +17,6 @@ import re
 from unicodedata import normalize, combining
 from unicodedata import normalize, combining
 from babel import Locale
 from babel import Locale
 from babel.localedata import locale_identifiers
 from babel.localedata import locale_identifiers
-from searx.languages import language_codes
 from searx.utils import extract_text, eval_xpath, match_language
 from searx.utils import extract_text, eval_xpath, match_language
 
 
 # engine dependent config
 # engine dependent config

+ 1 - 1
searx/engines/wikidata.py

@@ -21,9 +21,9 @@ from babel.dates import format_datetime, format_date, format_time, get_datetime_
 from searx import logger
 from searx import logger
 from searx.data import WIKIDATA_UNITS
 from searx.data import WIKIDATA_UNITS
 from searx.poolrequests import post, get
 from searx.poolrequests import post, get
-from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
 from searx.utils import match_language, searx_useragent, get_string_replaces_function
 from searx.utils import match_language, searx_useragent, get_string_replaces_function
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
 from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
+from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url  # NOQA
 
 
 logger = logger.getChild('wikidata')
 logger = logger.getChild('wikidata')
 
 

+ 2 - 3
searx/engines/yahoo_news.py

@@ -13,9 +13,8 @@ import re
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import html
 from lxml import html
-from searx.engines.yahoo import (
-    parse_url, _fetch_supported_languages, supported_languages_url, language_aliases
-)
+from searx.engines.yahoo import parse_url, language_aliases
+from searx.engines.yahoo import _fetch_supported_languages, supported_languages_url  # NOQA
 from dateutil import parser
 from dateutil import parser
 from searx.utils import extract_text, extract_url, match_language
 from searx.utils import extract_text, extract_url, match_language
 
 

+ 0 - 1
searx/engines/youtube_noapi.py

@@ -11,7 +11,6 @@
 from functools import reduce
 from functools import reduce
 from json import loads
 from json import loads
 from urllib.parse import quote_plus
 from urllib.parse import quote_plus
-from searx.utils import extract_text, list_get
 
 
 # engine dependent config
 # engine dependent config
 categories = ['videos', 'music']
 categories = ['videos', 'music']

+ 0 - 1
searx/plugins/__init__.py

@@ -20,7 +20,6 @@ from importlib import import_module
 from os import listdir, makedirs, remove, stat, utime
 from os import listdir, makedirs, remove, stat, utime
 from os.path import abspath, basename, dirname, exists, join
 from os.path import abspath, basename, dirname, exists, join
 from shutil import copyfile
 from shutil import copyfile
-from traceback import print_exc
 
 
 from searx import logger, settings, static_path
 from searx import logger, settings, static_path
 
 

+ 0 - 1
searx/results.py

@@ -1,5 +1,4 @@
 import re
 import re
-from collections import defaultdict
 from operator import itemgetter
 from operator import itemgetter
 from threading import RLock
 from threading import RLock
 from urllib.parse import urlparse, unquote
 from urllib.parse import urlparse, unquote

+ 1 - 1
searx/utils.py

@@ -7,7 +7,7 @@ from numbers import Number
 from os.path import splitext, join
 from os.path import splitext, join
 from random import choice
 from random import choice
 from html.parser import HTMLParser
 from html.parser import HTMLParser
-from urllib.parse import urljoin, urlparse, unquote
+from urllib.parse import urljoin, urlparse
 
 
 from lxml import html
 from lxml import html
 from lxml.etree import XPath, _ElementStringResult, _ElementUnicodeResult
 from lxml.etree import XPath, _ElementStringResult, _ElementUnicodeResult

+ 1 - 1
searx/webapp.py

@@ -40,7 +40,7 @@ from datetime import datetime, timedelta
 from time import time
 from time import time
 from html import escape
 from html import escape
 from io import StringIO
 from io import StringIO
-from urllib.parse import urlencode, urlparse, urljoin, urlsplit
+from urllib.parse import urlencode, urljoin, urlparse
 
 
 from pygments import highlight
 from pygments import highlight
 from pygments.lexers import get_lexer_by_name
 from pygments.lexers import get_lexer_by_name

+ 0 - 1
tests/unit/engines/test_command.py

@@ -14,7 +14,6 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
 
 
 '''
 '''
 
 
-from sys import version_info
 
 
 from searx.engines import command as command_engine
 from searx.engines import command as command_engine
 from searx.testing import SearxTestCase
 from searx.testing import SearxTestCase

+ 1 - 1
tests/unit/test_webadapter.py

@@ -5,7 +5,7 @@ from searx.preferences import Preferences
 from searx.engines import engines
 from searx.engines import engines
 
 
 import searx.search
 import searx.search
-from searx.search import EngineRef, SearchQuery
+from searx.search import EngineRef
 from searx.webadapter import validate_engineref_list
 from searx.webadapter import validate_engineref_list