Browse Source

[mod] pylint all engines without PYLINT_SEARXNG_DISABLE_OPTION

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
Markus Heiser 1 year ago
parent
commit
8205f170ff
100 changed files with 94 additions and 159 deletions
  1. 5 9
      manage
  2. 3 2
      searx/engines/1337x.py
  3. 0 1
      searx/engines/9gag.py
  4. 0 1
      searx/engines/__init__.py
  5. 1 1
      searx/engines/ahmia.py
  6. 0 1
      searx/engines/annas_archive.py
  7. 0 1
      searx/engines/apkmirror.py
  8. 2 3
      searx/engines/apple_app_store.py
  9. 0 1
      searx/engines/apple_maps.py
  10. 0 1
      searx/engines/archlinux.py
  11. 0 1
      searx/engines/artic.py
  12. 5 4
      searx/engines/arxiv.py
  13. 0 1
      searx/engines/ask.py
  14. 0 1
      searx/engines/bandcamp.py
  15. 12 8
      searx/engines/base.py
  16. 0 1
      searx/engines/bilibili.py
  17. 0 1
      searx/engines/bing.py
  18. 0 1
      searx/engines/bing_images.py
  19. 0 1
      searx/engines/bing_news.py
  20. 1 2
      searx/engines/bing_videos.py
  21. 0 1
      searx/engines/bpb.py
  22. 0 1
      searx/engines/brave.py
  23. 0 1
      searx/engines/bt4g.py
  24. 3 2
      searx/engines/btdigg.py
  25. 0 1
      searx/engines/chefkoch.py
  26. 4 3
      searx/engines/command.py
  27. 0 1
      searx/engines/core.py
  28. 0 1
      searx/engines/cppreference.py
  29. 0 1
      searx/engines/crossref.py
  30. 0 1
      searx/engines/currency_convert.py
  31. 0 1
      searx/engines/dailymotion.py
  32. 0 1
      searx/engines/deepl.py
  33. 1 1
      searx/engines/deezer.py
  34. 0 1
      searx/engines/demo_offline.py
  35. 0 1
      searx/engines/demo_online.py
  36. 0 1
      searx/engines/destatis.py
  37. 0 1
      searx/engines/deviantart.py
  38. 2 2
      searx/engines/dictzone.py
  39. 2 2
      searx/engines/digbt.py
  40. 0 1
      searx/engines/docker_hub.py
  41. 5 5
      searx/engines/doku.py
  42. 0 1
      searx/engines/duckduckgo.py
  43. 0 1
      searx/engines/duckduckgo_definitions.py
  44. 0 1
      searx/engines/duckduckgo_weather.py
  45. 4 3
      searx/engines/dummy-offline.py
  46. 4 4
      searx/engines/dummy.py
  47. 2 1
      searx/engines/ebay.py
  48. 0 1
      searx/engines/emojipedia.py
  49. 1 3
      searx/engines/flickr.py
  50. 0 1
      searx/engines/flickr_noapi.py
  51. 0 1
      searx/engines/fyyd.py
  52. 0 1
      searx/engines/genius.py
  53. 2 2
      searx/engines/gentoo.py
  54. 1 2
      searx/engines/github.py
  55. 0 1
      searx/engines/goodreads.py
  56. 0 1
      searx/engines/google.py
  57. 0 1
      searx/engines/google_images.py
  58. 0 1
      searx/engines/google_news.py
  59. 0 1
      searx/engines/google_play.py
  60. 0 1
      searx/engines/google_scholar.py
  61. 0 1
      searx/engines/google_videos.py
  62. 0 1
      searx/engines/hackernews.py
  63. 0 2
      searx/engines/imdb.py
  64. 0 1
      searx/engines/imgur.py
  65. 0 1
      searx/engines/internet_archive_scholar.py
  66. 0 1
      searx/engines/invidious.py
  67. 26 16
      searx/engines/json_engine.py
  68. 0 1
      searx/engines/kickass.py
  69. 0 1
      searx/engines/lemmy.py
  70. 0 1
      searx/engines/lib_rs.py
  71. 0 1
      searx/engines/lingva.py
  72. 0 1
      searx/engines/livespace.py
  73. 0 1
      searx/engines/mastodon.py
  74. 1 1
      searx/engines/material_icons.py
  75. 0 1
      searx/engines/mediathekviewweb.py
  76. 0 1
      searx/engines/mediawiki.py
  77. 0 1
      searx/engines/meilisearch.py
  78. 0 1
      searx/engines/metacpan.py
  79. 0 1
      searx/engines/mixcloud.py
  80. 0 1
      searx/engines/mongodb.py
  81. 0 1
      searx/engines/moviepilot.py
  82. 0 1
      searx/engines/mozhi.py
  83. 0 1
      searx/engines/mrs.py
  84. 0 1
      searx/engines/mullvad_leta.py
  85. 0 1
      searx/engines/mwmbl.py
  86. 0 1
      searx/engines/mysql_server.py
  87. 0 1
      searx/engines/npm.py
  88. 0 1
      searx/engines/nyaa.py
  89. 0 1
      searx/engines/odysee.py
  90. 3 3
      searx/engines/opensemantic.py
  91. 0 1
      searx/engines/openstreetmap.py
  92. 1 1
      searx/engines/pdbe.py
  93. 0 1
      searx/engines/peertube.py
  94. 1 1
      searx/engines/photon.py
  95. 0 1
      searx/engines/pinterest.py
  96. 0 1
      searx/engines/piped.py
  97. 2 2
      searx/engines/piratebay.py
  98. 0 1
      searx/engines/pixiv.py
  99. 0 1
      searx/engines/pkg_go_dev.py
  100. 0 1
      searx/engines/podcastindex.py

+ 5 - 9
manage

@@ -64,6 +64,11 @@ pylint.FILES() {
     find . -name searxng.msg
     find . -name searxng.msg
 }
 }
 
 
+PYLINT_FILES=()
+while IFS= read -r line; do
+   PYLINT_FILES+=("$line")
+done <<< "$(pylint.FILES)"
+
 YAMLLINT_FILES=()
 YAMLLINT_FILES=()
 while IFS= read -r line; do
 while IFS= read -r line; do
    YAMLLINT_FILES+=("$line")
    YAMLLINT_FILES+=("$line")
@@ -77,9 +82,6 @@ PYLINT_SEARXNG_DISABLE_OPTION="\
 I,C,R,\
 I,C,R,\
 W0105,W0212,W0511,W0603,W0613,W0621,W0702,W0703,W1401,\
 W0105,W0212,W0511,W0603,W0613,W0621,W0702,W0703,W1401,\
 E1136"
 E1136"
-PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES="traits,supported_languages,language_aliases,logger,categories"
-PYLINT_OPTIONS="-m pylint -j 0 --rcfile .pylintrc"
-
 help() {
 help() {
     nvm.help
     nvm.help
     cat <<EOF
     cat <<EOF
@@ -338,12 +340,6 @@ format.python() {
     dump_return $?
     dump_return $?
 }
 }
 
 
-
-PYLINT_FILES=()
-while IFS= read -r line; do
-   PYLINT_FILES+=("$line")
-done <<< "$(pylint.FILES)"
-
 # shellcheck disable=SC2119
 # shellcheck disable=SC2119
 main() {
 main() {
 
 

+ 3 - 2
searx/engines/1337x.py

@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-"""
- 1337x
+# pylint: disable=invalid-name
+"""1337x
+
 """
 """
 
 
 from urllib.parse import quote, urljoin
 from urllib.parse import quote, urljoin

+ 0 - 1
searx/engines/9gag.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 # pylint: disable=invalid-name
 # pylint: disable=invalid-name
 """9GAG (social media)"""
 """9GAG (social media)"""
 
 

+ 0 - 1
searx/engines/__init__.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Load and initialize the ``engines``, see :py:func:`load_engines` and register
 """Load and initialize the ``engines``, see :py:func:`load_engines` and register
 :py:obj:`engine_shortcuts`.
 :py:obj:`engine_shortcuts`.
 
 

+ 1 - 1
searx/engines/ahmia.py

@@ -74,7 +74,7 @@ def response(resp):
     if number_of_results:
     if number_of_results:
         try:
         try:
             results.append({'number_of_results': int(extract_text(number_of_results))})
             results.append({'number_of_results': int(extract_text(number_of_results))})
-        except:
+        except:  # pylint: disable=bare-except
             pass
             pass
 
 
     return results
     return results

+ 0 - 1
searx/engines/annas_archive.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """`Anna's Archive`_ is a free non-profit online shadow library metasearch
 """`Anna's Archive`_ is a free non-profit online shadow library metasearch
 engine providing access to a variety of book resources (also via IPFS), created
 engine providing access to a variety of book resources (also via IPFS), created
 by a team of anonymous archivists (AnnaArchivist_).
 by a team of anonymous archivists (AnnaArchivist_).

+ 0 - 1
searx/engines/apkmirror.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """APKMirror
 """APKMirror
 """
 """
 
 

+ 2 - 3
searx/engines/apple_app_store.py

@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
-"""
-    Apple App Store
+"""Apple App Store
+
 """
 """
 
 
 from json import loads
 from json import loads

+ 0 - 1
searx/engines/apple_maps.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Apple Maps"""
 """Apple Maps"""
 
 
 from json import loads
 from json import loads

+ 0 - 1
searx/engines/archlinux.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """
 """
 Arch Linux Wiki
 Arch Linux Wiki
 ~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~

+ 0 - 1
searx/engines/artic.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """The Art Institute of Chicago
 """The Art Institute of Chicago
 
 
 Explore thousands of artworks from The Art Institute of Chicago.
 Explore thousands of artworks from The Art Institute of Chicago.

+ 5 - 4
searx/engines/arxiv.py

@@ -1,11 +1,12 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-"""
- ArXiV (Scientific preprints)
+"""ArXiV (Scientific preprints)
+
 """
 """
 
 
+from datetime import datetime
+
 from lxml import etree
 from lxml import etree
 from lxml.etree import XPath
 from lxml.etree import XPath
-from datetime import datetime
 from searx.utils import eval_xpath, eval_xpath_list, eval_xpath_getindex
 from searx.utils import eval_xpath, eval_xpath_list, eval_xpath_getindex
 
 
 # about
 # about
@@ -50,7 +51,7 @@ def request(query, params):
     # basic search
     # basic search
     offset = (params['pageno'] - 1) * number_of_results
     offset = (params['pageno'] - 1) * number_of_results
 
 
-    string_args = dict(query=query, offset=offset, number_of_results=number_of_results)
+    string_args = {'query': query, 'offset': offset, 'number_of_results': number_of_results}
 
 
     params['url'] = base_url.format(**string_args)
     params['url'] = base_url.format(**string_args)
 
 

+ 0 - 1
searx/engines/ask.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Ask.com"""
 """Ask.com"""
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode

+ 0 - 1
searx/engines/bandcamp.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Bandcamp (Music)
 """Bandcamp (Music)
 
 
 @website     https://bandcamp.com/
 @website     https://bandcamp.com/

+ 12 - 8
searx/engines/base.py

@@ -1,12 +1,12 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
+"""BASE (Scholar publications)
+
 """
 """
- BASE (Scholar publications)
-"""
+from datetime import datetime
+import re
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode
 from lxml import etree
 from lxml import etree
-from datetime import datetime
-import re
 from searx.utils import searx_useragent
 from searx.utils import searx_useragent
 
 
 # about
 # about
@@ -55,13 +55,17 @@ shorcut_dict = {
 
 
 def request(query, params):
 def request(query, params):
     # replace shortcuts with API advanced search keywords
     # replace shortcuts with API advanced search keywords
-    for key in shorcut_dict.keys():
-        query = re.sub(key, shorcut_dict[key], query)
+    for key, val in shorcut_dict.items():
+        query = re.sub(key, val, query)
 
 
     # basic search
     # basic search
     offset = (params['pageno'] - 1) * number_of_results
     offset = (params['pageno'] - 1) * number_of_results
 
 
-    string_args = dict(query=urlencode({'query': query}), offset=offset, hits=number_of_results)
+    string_args = {
+        'query': urlencode({'query': query}),
+        'offset': offset,
+        'hits': number_of_results,
+    }
 
 
     params['url'] = base_url.format(**string_args)
     params['url'] = base_url.format(**string_args)
 
 
@@ -99,7 +103,7 @@ def response(resp):
             try:
             try:
                 publishedDate = datetime.strptime(date, date_format)
                 publishedDate = datetime.strptime(date, date_format)
                 break
                 break
-            except:
+            except:  # pylint: disable=bare-except
                 pass
                 pass
 
 
         if publishedDate is not None:
         if publishedDate is not None:

+ 0 - 1
searx/engines/bilibili.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Bilibili is a Chinese video sharing website.
 """Bilibili is a Chinese video sharing website.
 
 
 .. _Bilibili: https://www.bilibili.com
 .. _Bilibili: https://www.bilibili.com

+ 0 - 1
searx/engines/bing.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """This is the implementation of the Bing-WEB engine. Some of this
 """This is the implementation of the Bing-WEB engine. Some of this
 implementations are shared by other engines:
 implementations are shared by other engines:
 
 

+ 0 - 1
searx/engines/bing_images.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Bing-Images: description see :py:obj:`searx.engines.bing`.
 """Bing-Images: description see :py:obj:`searx.engines.bing`.
 """
 """
 # pylint: disable=invalid-name
 # pylint: disable=invalid-name

+ 0 - 1
searx/engines/bing_news.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Bing-News: description see :py:obj:`searx.engines.bing`.
 """Bing-News: description see :py:obj:`searx.engines.bing`.
 
 
 .. hint::
 .. hint::

+ 1 - 2
searx/engines/bing_videos.py

@@ -1,8 +1,7 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
+# pylint: disable=invalid-name
 """Bing-Videos: description see :py:obj:`searx.engines.bing`.
 """Bing-Videos: description see :py:obj:`searx.engines.bing`.
 """
 """
-# pylint: disable=invalid-name
 
 
 from typing import TYPE_CHECKING
 from typing import TYPE_CHECKING
 import json
 import json

+ 0 - 1
searx/engines/bpb.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """BPB refers to ``Bundeszentrale für poltische Bildung``, which is a German
 """BPB refers to ``Bundeszentrale für poltische Bildung``, which is a German
 governmental institution aiming to reduce misinformation by providing resources
 governmental institution aiming to reduce misinformation by providing resources
 about politics and history.
 about politics and history.

+ 0 - 1
searx/engines/brave.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Brave supports the categories listed in :py:obj:`brave_category` (General,
 """Brave supports the categories listed in :py:obj:`brave_category` (General,
 news, videos, images).  The support of :py:obj:`paging` and :py:obj:`time range
 news, videos, images).  The support of :py:obj:`paging` and :py:obj:`time range
 <time_range_support>` is limited (see remarks).
 <time_range_support>` is limited (see remarks).

+ 0 - 1
searx/engines/bt4g.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """BT4G_ (bt4g.com) is not a tracker and doesn't store any content and only
 """BT4G_ (bt4g.com) is not a tracker and doesn't store any content and only
 collects torrent metadata (such as file names and file sizes) and a magnet link
 collects torrent metadata (such as file names and file sizes) and a magnet link
 (torrent identifier).
 (torrent identifier).

+ 3 - 2
searx/engines/btdigg.py

@@ -3,8 +3,9 @@
  BTDigg (Videos, Music, Files)
  BTDigg (Videos, Music, Files)
 """
 """
 
 
-from lxml import html
 from urllib.parse import quote, urljoin
 from urllib.parse import quote, urljoin
+
+from lxml import html
 from searx.utils import extract_text, get_torrent_size
 from searx.utils import extract_text, get_torrent_size
 
 
 # about
 # about
@@ -67,7 +68,7 @@ def response(resp):
         # convert files to int if possible
         # convert files to int if possible
         try:
         try:
             files = int(files)
             files = int(files)
-        except:
+        except:  # pylint: disable=bare-except
             files = None
             files = None
 
 
         magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href']
         magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href']

+ 0 - 1
searx/engines/chefkoch.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Chefkoch is a German database of recipes.
 """Chefkoch is a German database of recipes.
 """
 """
 
 

+ 4 - 3
searx/engines/command.py

@@ -106,7 +106,7 @@ def init(engine_settings):
     if 'command' not in engine_settings:
     if 'command' not in engine_settings:
         raise ValueError('engine command : missing configuration key: command')
         raise ValueError('engine command : missing configuration key: command')
 
 
-    global command, working_dir, delimiter, parse_regex, environment_variables
+    global command, working_dir, delimiter, parse_regex, environment_variables  # pylint: disable=global-statement
 
 
     command = engine_settings['command']
     command = engine_settings['command']
 
 
@@ -172,7 +172,7 @@ def _get_results_from_process(results, cmd, pageno):
                     _command_logger.debug('skipped result:', raw_result)
                     _command_logger.debug('skipped result:', raw_result)
                     continue
                     continue
 
 
-                if start <= count and count <= end:
+                if start <= count and count <= end:  # pylint: disable=chained-comparison
                     result['template'] = result_template
                     result['template'] = result_template
                     results.append(result)
                     results.append(result)
 
 
@@ -185,6 +185,7 @@ def _get_results_from_process(results, cmd, pageno):
         return_code = process.wait(timeout=timeout)
         return_code = process.wait(timeout=timeout)
         if return_code != 0:
         if return_code != 0:
             raise RuntimeError('non-zero return code when running command', cmd, return_code)
             raise RuntimeError('non-zero return code when running command', cmd, return_code)
+        return None
 
 
 
 
 def __get_results_limits(pageno):
 def __get_results_limits(pageno):
@@ -230,7 +231,7 @@ def __parse_single_result(raw_result):
         elements = raw_result.split(delimiter['chars'], maxsplit=len(delimiter['keys']) - 1)
         elements = raw_result.split(delimiter['chars'], maxsplit=len(delimiter['keys']) - 1)
         if len(elements) != len(delimiter['keys']):
         if len(elements) != len(delimiter['keys']):
             return {}
             return {}
-        for i in range(len(elements)):
+        for i in range(len(elements)):  # pylint: disable=consider-using-enumerate
             result[delimiter['keys'][i]] = elements[i]
             result[delimiter['keys'][i]] = elements[i]
 
 
     if parse_regex:
     if parse_regex:

+ 0 - 1
searx/engines/core.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """CORE (science)
 """CORE (science)
 
 
 """
 """

+ 0 - 1
searx/engines/cppreference.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Cppreference
 """Cppreference
 """
 """
 from lxml import html
 from lxml import html

+ 0 - 1
searx/engines/crossref.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """CrossRef"""
 """CrossRef"""
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode

+ 0 - 1
searx/engines/currency_convert.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Currency convert (DuckDuckGo)
 """Currency convert (DuckDuckGo)
 """
 """
 
 

+ 0 - 1
searx/engines/dailymotion.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """
 """
 Dailymotion (Videos)
 Dailymotion (Videos)
 ~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~

+ 0 - 1
searx/engines/deepl.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Deepl translation engine"""
 """Deepl translation engine"""
 
 
 from json import loads
 from json import loads

+ 1 - 1
searx/engines/deezer.py

@@ -45,7 +45,7 @@ def response(resp):
     for result in search_res.get('data', []):
     for result in search_res.get('data', []):
         if result['type'] == 'track':
         if result['type'] == 'track':
             title = result['title']
             title = result['title']
-            url = result['link']
+            url = result['link']  # pylint: disable=redefined-outer-name
 
 
             if url.startswith('http://'):
             if url.startswith('http://'):
                 url = 'https' + url[4:]
                 url = 'https' + url[4:]

+ 0 - 1
searx/engines/demo_offline.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Within this module we implement a *demo offline engine*.  Do not look to
 """Within this module we implement a *demo offline engine*.  Do not look to
 close to the implementation, its just a simple example.  To get in use of this
 close to the implementation, its just a simple example.  To get in use of this
 *demo* engine add the following entry to your engines list in ``settings.yml``:
 *demo* engine add the following entry to your engines list in ``settings.yml``:

+ 0 - 1
searx/engines/demo_online.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Within this module we implement a *demo online engine*.  Do not look to
 """Within this module we implement a *demo online engine*.  Do not look to
 close to the implementation, its just a simple example which queries `The Art
 close to the implementation, its just a simple example which queries `The Art
 Institute of Chicago <https://www.artic.edu>`_
 Institute of Chicago <https://www.artic.edu>`_

+ 0 - 1
searx/engines/destatis.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """DeStatis
 """DeStatis
 """
 """
 
 

+ 0 - 1
searx/engines/deviantart.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Deviantart (Images)
 """Deviantart (Images)
 
 
 """
 """

+ 2 - 2
searx/engines/dictzone.py

@@ -26,7 +26,7 @@ results_xpath = './/table[@id="r"]/tr'
 https_support = True
 https_support = True
 
 
 
 
-def request(query, params):
+def request(query, params):  # pylint: disable=unused-argument
     params['url'] = url.format(from_lang=params['from_lang'][2], to_lang=params['to_lang'][2], query=params['query'])
     params['url'] = url.format(from_lang=params['from_lang'][2], to_lang=params['to_lang'][2], query=params['query'])
 
 
     return params
     return params
@@ -40,7 +40,7 @@ def response(resp):
     for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
     for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
         try:
         try:
             from_result, to_results_raw = eval_xpath(result, './td')
             from_result, to_results_raw = eval_xpath(result, './td')
-        except:
+        except:  # pylint: disable=bare-except
             continue
             continue
 
 
         to_results = []
         to_results = []

+ 2 - 2
searx/engines/digbt.py

@@ -37,9 +37,9 @@ def response(resp):
     search_res = dom.xpath('.//td[@class="x-item"]')
     search_res = dom.xpath('.//td[@class="x-item"]')
 
 
     if not search_res:
     if not search_res:
-        return list()
+        return []
 
 
-    results = list()
+    results = []
     for result in search_res:
     for result in search_res:
         url = urljoin(URL, result.xpath('.//a[@title]/@href')[0])
         url = urljoin(URL, result.xpath('.//a[@title]/@href')[0])
         title = extract_text(result.xpath('.//a[@title]'))
         title = extract_text(result.xpath('.//a[@title]'))

+ 0 - 1
searx/engines/docker_hub.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Docker Hub (IT)
 """Docker Hub (IT)
 
 
 """
 """

+ 5 - 5
searx/engines/doku.py

@@ -18,7 +18,7 @@ about = {
 }
 }
 
 
 # engine dependent config
 # engine dependent config
-categories = ['general']  # TODO , 'images', 'music', 'videos', 'files'
+categories = ['general']  # 'images', 'music', 'videos', 'files'
 paging = False
 paging = False
 number_of_results = 5
 number_of_results = 5
 
 
@@ -31,8 +31,8 @@ search_url = (
     '&{query}'
     '&{query}'
     # fmt: on
     # fmt: on
 )
 )
-# TODO  '&startRecord={offset}'
-# TODO  '&maximumRecords={limit}'
+# '&startRecord={offset}'
+# '&maximumRecords={limit}'
 
 
 
 
 # do search-request
 # do search-request
@@ -54,7 +54,7 @@ def response(resp):
     for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
     for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
         try:
         try:
             res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
             res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
-        except:
+        except:  # pylint: disable=bare-except
             continue
             continue
 
 
         if not res_url:
         if not res_url:
@@ -76,7 +76,7 @@ def response(resp):
 
 
                 # append result
                 # append result
                 results.append({'title': title, 'content': content, 'url': base_url + res_url})
                 results.append({'title': title, 'content': content, 'url': base_url + res_url})
-        except:
+        except:  # pylint: disable=bare-except
             continue
             continue
 
 
         if not res_url:
         if not res_url:

+ 0 - 1
searx/engines/duckduckgo.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """
 """
 DuckDuckGo Lite
 DuckDuckGo Lite
 ~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~

+ 0 - 1
searx/engines/duckduckgo_definitions.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """
 """
 DuckDuckGo Instant Answer API
 DuckDuckGo Instant Answer API
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

+ 0 - 1
searx/engines/duckduckgo_weather.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """
 """
 DuckDuckGo Weather
 DuckDuckGo Weather
 ~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~

+ 4 - 3
searx/engines/dummy-offline.py

@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-"""
- Dummy Offline
+# pylint: disable=invalid-name
+"""Dummy Offline
+
 """
 """
 
 
 
 
@@ -14,7 +15,7 @@ about = {
 }
 }
 
 
 
 
-def search(query, request_params):
+def search(query, request_params):  # pylint: disable=unused-argument
     return [
     return [
         {
         {
             'result': 'this is what you get',
             'result': 'this is what you get',

+ 4 - 4
searx/engines/dummy.py

@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-"""
- Dummy
+"""Dummy
+
 """
 """
 
 
 # about
 # about
@@ -15,10 +15,10 @@ about = {
 
 
 
 
 # do search-request
 # do search-request
-def request(query, params):
+def request(query, params):  # pylint: disable=unused-argument
     return params
     return params
 
 
 
 
 # get response from search-request
 # get response from search-request
-def response(resp):
+def response(resp):  # pylint: disable=unused-argument
     return []
     return []

+ 2 - 1
searx/engines/ebay.py

@@ -3,9 +3,10 @@
  Ebay (Videos, Music, Files)
  Ebay (Videos, Music, Files)
 """
 """
 
 
+from urllib.parse import quote
+
 from lxml import html
 from lxml import html
 from searx.engines.xpath import extract_text
 from searx.engines.xpath import extract_text
-from urllib.parse import quote
 
 
 # about
 # about
 about = {
 about = {

+ 0 - 1
searx/engines/emojipedia.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Emojipedia
 """Emojipedia
 
 
 Emojipedia is an emoji reference website which documents the meaning and
 Emojipedia is an emoji reference website which documents the meaning and

+ 1 - 3
searx/engines/flickr.py

@@ -78,12 +78,10 @@ def response(resp):
         else:
         else:
             thumbnail_src = img_src
             thumbnail_src = img_src
 
 
-        url = build_flickr_url(photo['owner'], photo['id'])
-
         # append result
         # append result
         results.append(
         results.append(
             {
             {
-                'url': url,
+                'url': build_flickr_url(photo['owner'], photo['id']),
                 'title': photo['title'],
                 'title': photo['title'],
                 'img_src': img_src,
                 'img_src': img_src,
                 'thumbnail_src': thumbnail_src,
                 'thumbnail_src': thumbnail_src,

+ 0 - 1
searx/engines/flickr_noapi.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Flickr (Images)
 """Flickr (Images)
 
 
 """
 """

+ 0 - 1
searx/engines/fyyd.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Fyyd (podcasts)
 """Fyyd (podcasts)
 """
 """
 
 

+ 0 - 1
searx/engines/genius.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 # pylint: disable=invalid-name
 # pylint: disable=invalid-name
 """Genius
 """Genius
 
 

+ 2 - 2
searx/engines/gentoo.py

@@ -107,7 +107,7 @@ def request(query, params):
 def response(resp):
 def response(resp):
     # get the base URL for the language in which request was made
     # get the base URL for the language in which request was made
     language = locale_to_lang_code(resp.search_params['language'])
     language = locale_to_lang_code(resp.search_params['language'])
-    base_url = get_lang_urls(language)['base']
+    url = get_lang_urls(language)['base']
 
 
     results = []
     results = []
 
 
@@ -116,7 +116,7 @@ def response(resp):
     # parse results
     # parse results
     for result in dom.xpath(xpath_results):
     for result in dom.xpath(xpath_results):
         link = result.xpath(xpath_link)[0]
         link = result.xpath(xpath_link)[0]
-        href = urljoin(base_url, link.attrib.get('href'))
+        href = urljoin(url, link.attrib.get('href'))
         title = extract_text(link)
         title = extract_text(link)
         content = extract_text(result.xpath(xpath_content))
         content = extract_text(result.xpath(xpath_content))
 
 

+ 1 - 2
searx/engines/github.py

@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
-
 """Github (IT)
 """Github (IT)
+
 """
 """
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode

+ 0 - 1
searx/engines/goodreads.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Goodreads (books)
 """Goodreads (books)
 """
 """
 
 

+ 0 - 1
searx/engines/google.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """This is the implementation of the Google WEB engine.  Some of this
 """This is the implementation of the Google WEB engine.  Some of this
 implementations (manly the :py:obj:`get_google_info`) are shared by other
 implementations (manly the :py:obj:`get_google_info`) are shared by other
 engines:
 engines:

+ 0 - 1
searx/engines/google_images.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """This is the implementation of the Google Images engine using the internal
 """This is the implementation of the Google Images engine using the internal
 Google API used by the Google Go Android app.
 Google API used by the Google Go Android app.
 
 

+ 0 - 1
searx/engines/google_news.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """This is the implementation of the Google News engine.
 """This is the implementation of the Google News engine.
 
 
 Google News has a different region handling compared to Google WEB.
 Google News has a different region handling compared to Google WEB.

+ 0 - 1
searx/engines/google_play.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Google Play Apps & Google Play Movies
 """Google Play Apps & Google Play Movies
 """
 """
 
 

+ 0 - 1
searx/engines/google_scholar.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """This is the implementation of the Google Scholar engine.
 """This is the implementation of the Google Scholar engine.
 
 
 Compared to other Google services the Scholar engine has a simple GET REST-API
 Compared to other Google services the Scholar engine has a simple GET REST-API

+ 0 - 1
searx/engines/google_videos.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """This is the implementation of the Google Videos engine.
 """This is the implementation of the Google Videos engine.
 
 
 .. admonition:: Content-Security-Policy (CSP)
 .. admonition:: Content-Security-Policy (CSP)

+ 0 - 1
searx/engines/hackernews.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Hackernews
 """Hackernews
 """
 """
 
 

+ 0 - 2
searx/engines/imdb.py

@@ -1,6 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
-
 """IMDB - Internet Movie Database
 """IMDB - Internet Movie Database
 
 
 Retrieves results from a basic search.  Advanced search options are not
 Retrieves results from a basic search.  Advanced search options are not

+ 0 - 1
searx/engines/imgur.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Imgur (images)
 """Imgur (images)
 """
 """
 
 

+ 0 - 1
searx/engines/internet_archive_scholar.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Internet Archive scholar(science)
 """Internet Archive scholar(science)
 """
 """
 
 

+ 0 - 1
searx/engines/invidious.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Invidious (Videos)
 """Invidious (Videos)
 """
 """
 
 

+ 26 - 16
searx/engines/json_engine.py

@@ -1,4 +1,15 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
+"""The JSON engine is a *generic* engine with which it is possible to configure
+engines in the settings.
+
+.. todo::
+
+   - The JSON engine needs documentation!!
+
+   - The parameters of the JSON engine should be adapted to those of the XPath
+     engine.
+
+"""
 
 
 from collections.abc import Iterable
 from collections.abc import Iterable
 from json import loads
 from json import loads
@@ -32,32 +43,31 @@ first_page_num = 1
 
 
 
 
 def iterate(iterable):
 def iterate(iterable):
-    if type(iterable) == dict:
-        it = iterable.items()
+    if isinstance(iterable, dict):
+        items = iterable.items()
 
 
     else:
     else:
-        it = enumerate(iterable)
-    for index, value in it:
+        items = enumerate(iterable)
+    for index, value in items:
         yield str(index), value
         yield str(index), value
 
 
 
 
 def is_iterable(obj):
 def is_iterable(obj):
-    if type(obj) == str:
+    if isinstance(obj, str):
         return False
         return False
     return isinstance(obj, Iterable)
     return isinstance(obj, Iterable)
 
 
 
 
-def parse(query):
-    q = []
+def parse(query):  # pylint: disable=redefined-outer-name
+    q = []  # pylint: disable=invalid-name
     for part in query.split('/'):
     for part in query.split('/'):
         if part == '':
         if part == '':
             continue
             continue
-        else:
-            q.append(part)
+        q.append(part)
     return q
     return q
 
 
 
 
-def do_query(data, q):
+def do_query(data, q):  # pylint: disable=invalid-name
     ret = []
     ret = []
     if not q:
     if not q:
         return ret
         return ret
@@ -87,10 +97,10 @@ def query(data, query_string):
     return do_query(data, q)
     return do_query(data, q)
 
 
 
 
-def request(query, params):
+def request(query, params):  # pylint: disable=redefined-outer-name
     query = urlencode({'q': query})[2:]
     query = urlencode({'q': query})[2:]
 
 
-    fp = {'query': query}
+    fp = {'query': query}  # pylint: disable=invalid-name
     if paging and search_url.find('{pageno}') >= 0:
     if paging and search_url.find('{pageno}') >= 0:
         fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
         fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
 
 
@@ -115,18 +125,18 @@ def response(resp):
     content_filter = html_to_text if content_html_to_text else identity
     content_filter = html_to_text if content_html_to_text else identity
 
 
     if results_query:
     if results_query:
-        rs = query(json, results_query)
-        if not len(rs):
+        rs = query(json, results_query)  # pylint: disable=invalid-name
+        if not rs:
             return results
             return results
         for result in rs[0]:
         for result in rs[0]:
             try:
             try:
                 url = query(result, url_query)[0]
                 url = query(result, url_query)[0]
                 title = query(result, title_query)[0]
                 title = query(result, title_query)[0]
-            except:
+            except:  # pylint: disable=bare-except
                 continue
                 continue
             try:
             try:
                 content = query(result, content_query)[0]
                 content = query(result, content_query)[0]
-            except:
+            except:  # pylint: disable=bare-except
                 content = ""
                 content = ""
             results.append(
             results.append(
                 {
                 {

+ 0 - 1
searx/engines/kickass.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Kickass Torrent (Videos, Music, Files)"""
 """Kickass Torrent (Videos, Music, Files)"""
 
 
 import random
 import random

+ 0 - 1
searx/engines/lemmy.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """This engine uses the Lemmy API (https://lemmy.ml/api/v3/search), which is
 """This engine uses the Lemmy API (https://lemmy.ml/api/v3/search), which is
 documented at `lemmy-js-client`_ / `Interface Search`_.  Since Lemmy is
 documented at `lemmy-js-client`_ / `Interface Search`_.  Since Lemmy is
 federated, results are from many different, independent lemmy instances, and not
 federated, results are from many different, independent lemmy instances, and not

+ 0 - 1
searx/engines/lib_rs.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """lib.rs (packages)"""
 """lib.rs (packages)"""
 
 
 from urllib.parse import quote_plus
 from urllib.parse import quote_plus

+ 0 - 1
searx/engines/lingva.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Lingva (alternative Google Translate frontend)"""
 """Lingva (alternative Google Translate frontend)"""
 
 
 from json import loads
 from json import loads

+ 0 - 1
searx/engines/livespace.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """LiveSpace (Videos)
 """LiveSpace (Videos)
 
 
 .. hint::
 .. hint::

+ 0 - 1
searx/engines/mastodon.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Mastodon_ is an open source alternative to large social media platforms like
 """Mastodon_ is an open source alternative to large social media platforms like
 Twitter/X, Facebook, ...
 Twitter/X, Facebook, ...
 
 

+ 1 - 1
searx/engines/material_icons.py

@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Material Icons (images)
 """Material Icons (images)
+
 """
 """
 
 
 import re
 import re

+ 0 - 1
searx/engines/mediathekviewweb.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """MediathekViewWeb (API)
 """MediathekViewWeb (API)
 
 
 """
 """

+ 0 - 1
searx/engines/mediawiki.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """The MediaWiki engine is a *generic* engine to **query** Wikimedia wikis by
 """The MediaWiki engine is a *generic* engine to **query** Wikimedia wikis by
 the `MediaWiki Action API`_.  For a `query action`_ all Wikimedia wikis have
 the `MediaWiki Action API`_.  For a `query action`_ all Wikimedia wikis have
 endpoints that follow this pattern::
 endpoints that follow this pattern::

+ 0 - 1
searx/engines/meilisearch.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """.. sidebar:: info
 """.. sidebar:: info
 
 
    - :origin:`meilisearch.py <searx/engines/meilisearch.py>`
    - :origin:`meilisearch.py <searx/engines/meilisearch.py>`

+ 0 - 1
searx/engines/metacpan.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """metacpan
 """metacpan
 """
 """
 
 

+ 0 - 1
searx/engines/mixcloud.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Mixcloud (Music)
 """Mixcloud (Music)
 
 
 """
 """

+ 0 - 1
searx/engines/mongodb.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """MongoDB_ is a document based database program that handles JSON like data.
 """MongoDB_ is a document based database program that handles JSON like data.
 Before configuring the ``mongodb`` engine, you must install the dependency
 Before configuring the ``mongodb`` engine, you must install the dependency
 pymongo_.
 pymongo_.

+ 0 - 1
searx/engines/moviepilot.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Moviepilot is a German movie database, similar to IMDB or TMDB.  It doesn't
 """Moviepilot is a German movie database, similar to IMDB or TMDB.  It doesn't
 have any official API, but it uses JSON requests internally to fetch search
 have any official API, but it uses JSON requests internally to fetch search
 results and suggestions, that's being used in this implementation.
 results and suggestions, that's being used in this implementation.

+ 0 - 1
searx/engines/mozhi.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Mozhi (alternative frontend for popular translation engines)"""
 """Mozhi (alternative frontend for popular translation engines)"""
 
 
 import random
 import random

+ 0 - 1
searx/engines/mrs.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Matrix Rooms Search - a fully-featured, standalone, matrix rooms search service.
 """Matrix Rooms Search - a fully-featured, standalone, matrix rooms search service.
 
 
 Configuration
 Configuration

+ 0 - 1
searx/engines/mullvad_leta.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 
 
 """This is the implementation of the Mullvad-Leta meta-search engine.
 """This is the implementation of the Mullvad-Leta meta-search engine.
 
 

+ 0 - 1
searx/engines/mwmbl.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Mwmbl_ is a non-profit, ad-free, free-libre and free-lunch search engine with
 """Mwmbl_ is a non-profit, ad-free, free-libre and free-lunch search engine with
 a focus on useability and speed.
 a focus on useability and speed.
 
 

+ 0 - 1
searx/engines/mysql_server.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """MySQL is said to be the most popular open source database.  Before enabling
 """MySQL is said to be the most popular open source database.  Before enabling
 MySQL engine, you must install the package ``mysql-connector-python``.
 MySQL engine, you must install the package ``mysql-connector-python``.
 
 

+ 0 - 1
searx/engines/npm.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """npms.io
 """npms.io
 
 
 """
 """

+ 0 - 1
searx/engines/nyaa.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Nyaa.si (Anime Bittorrent tracker)
 """Nyaa.si (Anime Bittorrent tracker)
 
 
 """
 """

+ 0 - 1
searx/engines/odysee.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Odysee_ is a decentralized video hosting platform.
 """Odysee_ is a decentralized video hosting platform.
 
 
 .. _Odysee: https://github.com/OdyseeTeam/odysee-frontend
 .. _Odysee: https://github.com/OdyseeTeam/odysee-frontend

+ 3 - 3
searx/engines/opensemantic.py

@@ -1,11 +1,11 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-"""
- Open Semantic Search
+"""Open Semantic Search
+
 """
 """
 
 
-from dateutil import parser
 from json import loads
 from json import loads
 from urllib.parse import quote
 from urllib.parse import quote
+from dateutil import parser
 
 
 # about
 # about
 about = {
 about = {

+ 0 - 1
searx/engines/openstreetmap.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """OpenStreetMap (Map)
 """OpenStreetMap (Map)
 
 
 """
 """

+ 1 - 1
searx/engines/pdbe.py

@@ -96,7 +96,7 @@ def response(resp):
             title = gettext('{title} (OBSOLETE)').format(title=result['title'])
             title = gettext('{title} (OBSOLETE)').format(title=result['title'])
             try:
             try:
                 superseded_url = pdbe_entry_url.format(pdb_id=result['superseded_by'])
                 superseded_url = pdbe_entry_url.format(pdb_id=result['superseded_by'])
-            except:
+            except:  # pylint: disable=bare-except
                 continue
                 continue
 
 
             # since we can't construct a proper body from the response, we'll make up our own
             # since we can't construct a proper body from the response, we'll make up our own

+ 0 - 1
searx/engines/peertube.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Peertube and :py:obj:`SepiaSearch <searx.engines.sepiasearch>` do share
 """Peertube and :py:obj:`SepiaSearch <searx.engines.sepiasearch>` do share
 (more or less) the same REST API and the schema of the JSON result is identical.
 (more or less) the same REST API and the schema of the JSON result is identical.
 
 

+ 1 - 1
searx/engines/photon.py

@@ -87,7 +87,7 @@ def response(resp):
                 properties.get('extent')[2],
                 properties.get('extent')[2],
             ]
             ]
         else:
         else:
-            # TODO: better boundingbox calculation
+            # better boundingbox calculation?
             boundingbox = [
             boundingbox = [
                 geojson['coordinates'][1],
                 geojson['coordinates'][1],
                 geojson['coordinates'][1],
                 geojson['coordinates'][1],

+ 0 - 1
searx/engines/pinterest.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Pinterest (images)
 """Pinterest (images)
 """
 """
 
 

+ 0 - 1
searx/engines/piped.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """An alternative privacy-friendly YouTube frontend which is efficient by
 """An alternative privacy-friendly YouTube frontend which is efficient by
 design.  `Piped’s architecture`_ consists of 3 components:
 design.  `Piped’s architecture`_ consists of 3 components:
 
 

+ 2 - 2
searx/engines/piratebay.py

@@ -82,14 +82,14 @@ def response(resp):
         try:
         try:
             date = datetime.fromtimestamp(float(result["added"]))
             date = datetime.fromtimestamp(float(result["added"]))
             params['publishedDate'] = date
             params['publishedDate'] = date
-        except:
+        except:  # pylint: disable=bare-except
             pass
             pass
 
 
         # let's try to calculate the torrent size
         # let's try to calculate the torrent size
         try:
         try:
             filesize = get_torrent_size(result["size"], "B")
             filesize = get_torrent_size(result["size"], "B")
             params['filesize'] = filesize
             params['filesize'] = filesize
-        except:
+        except:  # pylint: disable=bare-except
             pass
             pass
 
 
         # append result
         # append result

+ 0 - 1
searx/engines/pixiv.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Pixiv (images)"""
 """Pixiv (images)"""
 
 
 from urllib.parse import urlencode
 from urllib.parse import urlencode

+ 0 - 1
searx/engines/pkg_go_dev.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """pkg.go.dev (packages)"""
 """pkg.go.dev (packages)"""
 
 
 import re
 import re

+ 0 - 1
searx/engines/podcastindex.py

@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: AGPL-3.0-or-later
 # SPDX-License-Identifier: AGPL-3.0-or-later
-# lint: pylint
 """Podcast Index
 """Podcast Index
 """
 """
 
 

Some files were not shown because too many files changed in this diff