Browse Source

update versions.cfg to use the current up-to-date packages

Alexandre Flament 10 years ago
parent
commit
4689fe341c

+ 1 - 1
searx/autocomplete.py

@@ -28,7 +28,7 @@ from searx.poolrequests import get as http_get
 
 
 
 
 def get(*args, **kwargs):
 def get(*args, **kwargs):
-    if not 'timeout' in kwargs:
+    if 'timeout' not in kwargs:
         kwargs['timeout'] = settings['server']['request_timeout']
         kwargs['timeout'] = settings['server']['request_timeout']
 
 
     return http_get(*args, **kwargs)
     return http_get(*args, **kwargs)

+ 2 - 2
searx/engines/__init__.py

@@ -86,7 +86,7 @@ def load_engine(engine_data):
             continue
             continue
         if getattr(engine, engine_attr) is None:
         if getattr(engine, engine_attr) is None:
             logger.error('Missing engine config attribute: "{0}.{1}"'
             logger.error('Missing engine config attribute: "{0}.{1}"'
-                  .format(engine.name, engine_attr))
+                         .format(engine.name, engine_attr))
             sys.exit(1)
             sys.exit(1)
 
 
     engine.stats = {
     engine.stats = {
@@ -106,7 +106,7 @@ def load_engine(engine_data):
     if engine.shortcut:
     if engine.shortcut:
         if engine.shortcut in engine_shortcuts:
         if engine.shortcut in engine_shortcuts:
             logger.error('Engine config error: ambigious shortcut: {0}'
             logger.error('Engine config error: ambigious shortcut: {0}'
-                  .format(engine.shortcut))
+                         .format(engine.shortcut))
             sys.exit(1)
             sys.exit(1)
         engine_shortcuts[engine.shortcut] = engine.name
         engine_shortcuts[engine.shortcut] = engine.name
     return engine
     return engine

+ 14 - 12
searx/engines/bing.py

@@ -1,15 +1,17 @@
-## Bing (Web)
-#
-# @website     https://www.bing.com
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
-#              max. 5000 query/month
-#
-# @using-api   no (because of query limit)
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content
-#
-# @todo        publishedDate
+"""
+ Bing (Web)
+
+ @website     https://www.bing.com
+ @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
+              max. 5000 query/month
+
+ @using-api   no (because of query limit)
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content
+
+ @todo        publishedDate
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from cgi import escape
 from cgi import escape

+ 17 - 15
searx/engines/bing_images.py

@@ -1,17 +1,19 @@
-## Bing (Images)
-#
-# @website     https://www.bing.com/images
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
-#              max. 5000 query/month
-#
-# @using-api   no (because of query limit)
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, img_src
-#
-# @todo        currently there are up to 35 images receive per page,
-#              because bing does not parse count=10.
-#              limited response to 10 images
+"""
+ Bing (Images)
+
+ @website     https://www.bing.com/images
+ @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
+              max. 5000 query/month
+
+ @using-api   no (because of query limit)
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, img_src
+
+ @todo        currently there are up to 35 images receive per page,
+              because bing does not parse count=10.
+              limited response to 10 images
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from lxml import html
 from lxml import html
@@ -76,7 +78,7 @@ def response(resp):
         title = link.attrib.get('t1')
         title = link.attrib.get('t1')
         ihk = link.attrib.get('ihk')
         ihk = link.attrib.get('ihk')
 
 
-        #url = 'http://' + link.attrib.get('t3')
+        # url = 'http://' + link.attrib.get('t3')
         url = yaml_data.get('surl')
         url = yaml_data.get('surl')
         img_src = yaml_data.get('imgurl')
         img_src = yaml_data.get('imgurl')
 
 

+ 14 - 10
searx/engines/bing_news.py

@@ -1,13 +1,15 @@
-## Bing (News)
-#
-# @website     https://www.bing.com/news
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
-#              max. 5000 query/month
-#
-# @using-api   no (because of query limit)
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content, publishedDate
+"""
+ Bing (News)
+
+ @website     https://www.bing.com/news
+ @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
+              max. 5000 query/month
+
+ @using-api   no (because of query limit)
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content, publishedDate
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from cgi import escape
 from cgi import escape
@@ -87,6 +89,8 @@ def response(resp):
                 publishedDate = parser.parse(publishedDate, dayfirst=False)
                 publishedDate = parser.parse(publishedDate, dayfirst=False)
             except TypeError:
             except TypeError:
                 publishedDate = datetime.now()
                 publishedDate = datetime.now()
+            except ValueError:
+                publishedDate = datetime.now()
 
 
         # append result
         # append result
         results.append({'url': url,
         results.append({'url': url,

+ 11 - 9
searx/engines/blekko_images.py

@@ -1,12 +1,14 @@
-## Blekko (Images)
-#
-# @website     https://blekko.com
-# @provide-api yes (inofficial)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, img_src
+"""
+ Blekko (Images)
+
+ @website     https://blekko.com
+ @provide-api yes (inofficial)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, img_src
+"""
 
 
 from json import loads
 from json import loads
 from urllib import urlencode
 from urllib import urlencode

+ 11 - 9
searx/engines/btdigg.py

@@ -1,12 +1,14 @@
-## BTDigg (Videos, Music, Files)
-#
-# @website     https://btdigg.org
-# @provide-api yes (on demand)
-#
-# @using-api   no
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content, seed, leech, magnetlink
+"""
+ BTDigg (Videos, Music, Files)
+
+ @website     https://btdigg.org
+ @provide-api yes (on demand)
+
+ @using-api   no
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content, seed, leech, magnetlink
+"""
 
 
 from urlparse import urljoin
 from urlparse import urljoin
 from cgi import escape
 from cgi import escape

+ 14 - 12
searx/engines/dailymotion.py

@@ -1,14 +1,16 @@
-## Dailymotion (Videos)
-#
-# @website     https://www.dailymotion.com
-# @provide-api yes (http://www.dailymotion.com/developer)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, thumbnail, publishedDate, embedded
-#
-# @todo        set content-parameter with correct data
+"""
+ Dailymotion (Videos)
+
+ @website     https://www.dailymotion.com
+ @provide-api yes (http://www.dailymotion.com/developer)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, thumbnail, publishedDate, embedded
+
+ @todo        set content-parameter with correct data
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads
@@ -48,7 +50,7 @@ def response(resp):
     search_res = loads(resp.text)
     search_res = loads(resp.text)
 
 
     # return empty array if there are no results
     # return empty array if there are no results
-    if not 'list' in search_res:
+    if 'list' not in search_res:
         return []
         return []
 
 
     # parse results
     # parse results

+ 11 - 9
searx/engines/deezer.py

@@ -1,12 +1,14 @@
-## Deezer (Music)
-#
-# @website     https://deezer.com
-# @provide-api yes (http://developers.deezer.com/api/)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, content, embedded
+"""
+ Deezer (Music)
+
+ @website     https://deezer.com
+ @provide-api yes (http://developers.deezer.com/api/)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, content, embedded
+"""
 
 
 from json import loads
 from json import loads
 from urllib import urlencode
 from urllib import urlencode

+ 13 - 11
searx/engines/deviantart.py

@@ -1,14 +1,16 @@
-## Deviantart (Images)
-#
-# @website     https://www.deviantart.com/
-# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
-#
-# @using-api   no (TODO, rewrite to api)
-# @results     HTML
-# @stable      no (HTML can change)
-# @parse       url, title, thumbnail_src, img_src
-#
-# @todo        rewrite to api
+"""
+ Deviantart (Images)
+
+ @website     https://www.deviantart.com/
+ @provide-api yes (https://www.deviantart.com/developers/) (RSS)
+
+ @using-api   no (TODO, rewrite to api)
+ @results     HTML
+ @stable      no (HTML can change)
+ @parse       url, title, thumbnail_src, img_src
+
+ @todo        rewrite to api
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from urlparse import urljoin
 from urlparse import urljoin

+ 11 - 9
searx/engines/digg.py

@@ -1,12 +1,14 @@
-## Digg (News, Social media)
-#
-# @website     https://digg.com/
-# @provide-api no
-#
-# @using-api   no
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content, publishedDate, thumbnail
+"""
+ Digg (News, Social media)
+
+ @website     https://digg.com/
+ @provide-api no
+
+ @using-api   no
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content, publishedDate, thumbnail
+"""
 
 
 from urllib import quote_plus
 from urllib import quote_plus
 from json import loads
 from json import loads

+ 16 - 14
searx/engines/duckduckgo.py

@@ -1,17 +1,19 @@
-## DuckDuckGo (Web)
-#
-# @website     https://duckduckgo.com/
-# @provide-api yes (https://duckduckgo.com/api),
-#              but not all results from search-site
-#
-# @using-api   no
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content
-#
-# @todo        rewrite to api
-# @todo        language support
-#              (the current used site does not support language-change)
+"""
+ DuckDuckGo (Web)
+
+ @website     https://duckduckgo.com/
+ @provide-api yes (https://duckduckgo.com/api),
+              but not all results from search-site
+
+ @using-api   no
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content
+
+ @todo        rewrite to api
+ @todo        language support
+              (the current used site does not support language-change)
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from lxml.html import fromstring
 from lxml.html import fromstring

+ 6 - 4
searx/engines/dummy.py

@@ -1,7 +1,9 @@
-## Dummy
-#
-# @results     empty array
-# @stable      yes
+"""
+ Dummy
+
+ @results     empty array
+ @stable      yes
+"""
 
 
 
 
 # do search-request
 # do search-request

+ 11 - 9
searx/engines/faroo.py

@@ -1,12 +1,14 @@
-## Faroo (Web, News)
-#
-# @website     http://www.faroo.com
-# @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, content, publishedDate, img_src
+"""
+ Faroo (Web, News)
+
+ @website     http://www.faroo.com
+ @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, content, publishedDate, img_src
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads

+ 14 - 12
searx/engines/flickr.py

@@ -1,15 +1,17 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 
 
-## Flickr (Images)
-#
-# @website     https://www.flickr.com
-# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, thumbnail, img_src
-#More info on api-key : https://www.flickr.com/services/apps/create/
+"""
+ Flickr (Images)
+
+ @website     https://www.flickr.com
+ @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, thumbnail, img_src
+ More info on api-key : https://www.flickr.com/services/apps/create/
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads
@@ -48,10 +50,10 @@ def response(resp):
     search_results = loads(resp.text)
     search_results = loads(resp.text)
 
 
     # return empty array if there are no results
     # return empty array if there are no results
-    if not 'photos' in search_results:
+    if 'photos' not in search_results:
         return []
         return []
 
 
-    if not 'photo' in search_results['photos']:
+    if 'photo' not in search_results['photos']:
         return []
         return []
 
 
     photos = search_results['photos']['photo']
     photos = search_results['photos']['photo']

+ 13 - 11
searx/engines/flickr_noapi.py

@@ -1,14 +1,16 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 
 
-#  Flickr (Images)
-#
-# @website     https://www.flickr.com
-# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
-#
-# @using-api   no
-# @results     HTML
-# @stable      no
-# @parse       url, title, thumbnail, img_src
+"""
+  Flickr (Images)
+
+ @website     https://www.flickr.com
+ @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
+
+ @using-api   no
+ @results     HTML
+ @stable      no
+ @parse       url, title, thumbnail, img_src
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads
@@ -20,8 +22,8 @@ logger = logger.getChild('flickr-noapi')
 
 
 categories = ['images']
 categories = ['images']
 
 
-url = 'https://secure.flickr.com/'
-search_url = url + 'search/?{query}&page={page}'
+url = 'https://www.flickr.com/'
+search_url = url + 'search?{query}&page={page}'
 photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
 photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
 regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
 regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
 image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
 image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')

+ 13 - 11
searx/engines/generalfile.py

@@ -1,14 +1,16 @@
-## General Files (Files)
-#
-# @website     http://www.general-files.org
-# @provide-api no (nothing found)
-#
-# @using-api   no (because nothing found)
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content
-#
-# @todo        detect torrents?
+"""
+ General Files (Files)
+
+ @website     http://www.general-files.org
+ @provide-api no (nothing found)
+
+ @using-api   no (because nothing found)
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content
+
+ @todo        detect torrents?
+"""
 
 
 from lxml import html
 from lxml import html
 
 

+ 11 - 9
searx/engines/gigablast.py

@@ -1,12 +1,14 @@
-## Gigablast (Web)
-#
-# @website     http://gigablast.com
-# @provide-api yes (http://gigablast.com/api.html)
-#
-# @using-api   yes
-# @results     XML
-# @stable      yes
-# @parse       url, title, content
+"""
+ Gigablast (Web)
+
+ @website     http://gigablast.com
+ @provide-api yes (http://gigablast.com/api.html)
+
+ @using-api   yes
+ @results     XML
+ @stable      yes
+ @parse       url, title, content
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from cgi import escape
 from cgi import escape

+ 12 - 10
searx/engines/github.py

@@ -1,12 +1,14 @@
-## Github (It)
-#
-# @website     https://github.com/
-# @provide-api yes (https://developer.github.com/v3/)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes (using api)
-# @parse       url, title, content
+"""
+ Github (It)
+
+ @website     https://github.com/
+ @provide-api yes (https://developer.github.com/v3/)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes (using api)
+ @parse       url, title, content
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads
@@ -37,7 +39,7 @@ def response(resp):
     search_res = loads(resp.text)
     search_res = loads(resp.text)
 
 
     # check if items are recieved
     # check if items are recieved
-    if not 'items' in search_res:
+    if 'items' not in search_res:
         return []
         return []
 
 
     # parse results
     # parse results

+ 12 - 10
searx/engines/google_images.py

@@ -1,13 +1,15 @@
-## Google (Images)
-#
-# @website     https://www.google.com
-# @provide-api yes (https://developers.google.com/web-search/docs/),
-#              deprecated!
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes (but deprecated)
-# @parse       url, title, img_src
+"""
+ Google (Images)
+
+ @website     https://www.google.com
+ @provide-api yes (https://developers.google.com/web-search/docs/),
+              deprecated!
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes (but deprecated)
+ @parse       url, title, img_src
+"""
 
 
 from urllib import urlencode, unquote
 from urllib import urlencode, unquote
 from json import loads
 from json import loads

+ 12 - 10
searx/engines/google_news.py

@@ -1,13 +1,15 @@
-## Google (News)
-#
-# @website     https://www.google.com
-# @provide-api yes (https://developers.google.com/web-search/docs/),
-#              deprecated!
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes (but deprecated)
-# @parse       url, title, content, publishedDate
+"""
+ Google (News)
+
+ @website     https://www.google.com
+ @provide-api yes (https://developers.google.com/web-search/docs/),
+              deprecated!
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes (but deprecated)
+ @parse       url, title, content, publishedDate
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads

+ 1 - 1
searx/engines/json_engine.py

@@ -6,7 +6,7 @@ search_url = None
 url_query = None
 url_query = None
 content_query = None
 content_query = None
 title_query = None
 title_query = None
-#suggestion_xpath = ''
+# suggestion_xpath = ''
 
 
 
 
 def iterate(iterable):
 def iterate(iterable):

+ 11 - 9
searx/engines/kickass.py

@@ -1,12 +1,14 @@
-## Kickass Torrent (Videos, Music, Files)
-#
-# @website     https://kickass.so
-# @provide-api no (nothing found)
-#
-# @using-api   no
-# @results     HTML (using search portal)
-# @stable      yes (HTML can change)
-# @parse       url, title, content, seed, leech, magnetlink
+"""
+ Kickass Torrent (Videos, Music, Files)
+
+ @website     https://kickass.so
+ @provide-api no (nothing found)
+
+ @using-api   no
+ @results     HTML (using search portal)
+ @stable      yes (HTML can change)
+ @parse       url, title, content, seed, leech, magnetlink
+"""
 
 
 from urlparse import urljoin
 from urlparse import urljoin
 from cgi import escape
 from cgi import escape

+ 13 - 11
searx/engines/mediawiki.py

@@ -1,14 +1,16 @@
-## general mediawiki-engine (Web)
-#
-# @website     websites built on mediawiki (https://www.mediawiki.org)
-# @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title
-#
-# @todo        content
+"""
+ general mediawiki-engine (Web)
+
+ @website     websites built on mediawiki (https://www.mediawiki.org)
+ @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title
+
+ @todo        content
+"""
 
 
 from json import loads
 from json import loads
 from string import Formatter
 from string import Formatter

+ 11 - 9
searx/engines/mixcloud.py

@@ -1,12 +1,14 @@
-## Mixcloud (Music)
-#
-# @website     https://http://www.mixcloud.com/
-# @provide-api yes (http://www.mixcloud.com/developers/
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, content, embedded, publishedDate
+"""
+ Mixcloud (Music)
+
+ @website     https://http://www.mixcloud.com/
+ @provide-api yes (http://www.mixcloud.com/developers/
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, content, embedded, publishedDate
+"""
 
 
 from json import loads
 from json import loads
 from urllib import urlencode
 from urllib import urlencode

+ 11 - 9
searx/engines/openstreetmap.py

@@ -1,12 +1,14 @@
-## OpenStreetMap (Map)
-#
-# @website     https://openstreetmap.org/
-# @provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title
+"""
+ OpenStreetMap (Map)
+
+ @website     https://openstreetmap.org/
+ @provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title
+"""
 
 
 from json import loads
 from json import loads
 from searx.utils import searx_useragent
 from searx.utils import searx_useragent

+ 11 - 9
searx/engines/photon.py

@@ -1,12 +1,14 @@
-## Photon (Map)
-#
-# @website     https://photon.komoot.de
-# @provide-api yes (https://photon.komoot.de/)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title
+"""
+ Photon (Map)
+
+ @website     https://photon.komoot.de
+ @provide-api yes (https://photon.komoot.de/)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads

+ 11 - 9
searx/engines/searchcode_code.py

@@ -1,12 +1,14 @@
-## Searchcode (It)
-#
-# @website     https://searchcode.com/
-# @provide-api yes (https://searchcode.com/api/)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, content
+"""
+ Searchcode (It)
+
+ @website     https://searchcode.com/
+ @provide-api yes (https://searchcode.com/api/)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, content
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads

+ 11 - 9
searx/engines/searchcode_doc.py

@@ -1,12 +1,14 @@
-## Searchcode (It)
-#
-# @website     https://searchcode.com/
-# @provide-api yes (https://searchcode.com/api/)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, content
+"""
+ Searchcode (It)
+
+ @website     https://searchcode.com/
+ @provide-api yes (https://searchcode.com/api/)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, content
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from json import loads
 from json import loads

+ 11 - 9
searx/engines/soundcloud.py

@@ -1,12 +1,14 @@
-## Soundcloud (Music)
-#
-# @website     https://soundcloud.com
-# @provide-api yes (https://developers.soundcloud.com/)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, content, publishedDate, embedded
+"""
+ Soundcloud (Music)
+
+ @website     https://soundcloud.com
+ @provide-api yes (https://developers.soundcloud.com/)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, content, publishedDate, embedded
+"""
 
 
 from json import loads
 from json import loads
 from urllib import urlencode, quote_plus
 from urllib import urlencode, quote_plus

+ 11 - 9
searx/engines/spotify.py

@@ -1,12 +1,14 @@
-## Spotify (Music)
-#
-# @website     https://spotify.com
-# @provide-api yes (https://developer.spotify.com/web-api/search-item/)
-#
-# @using-api   yes
-# @results     JSON
-# @stable      yes
-# @parse       url, title, content, embedded
+"""
+ Spotify (Music)
+
+ @website     https://spotify.com
+ @provide-api yes (https://developer.spotify.com/web-api/search-item/)
+
+ @using-api   yes
+ @results     JSON
+ @stable      yes
+ @parse       url, title, content, embedded
+"""
 
 
 from json import loads
 from json import loads
 from urllib import urlencode
 from urllib import urlencode

+ 11 - 9
searx/engines/stackoverflow.py

@@ -1,12 +1,14 @@
-## Stackoverflow (It)
-#
-# @website     https://stackoverflow.com/
-# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
-#
-# @using-api   no
-# @results     HTML
-# @stable      no (HTML can change)
-# @parse       url, title, content
+"""
+ Stackoverflow (It)
+
+ @website     https://stackoverflow.com/
+ @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
+
+ @using-api   no
+ @results     HTML
+ @stable      no (HTML can change)
+ @parse       url, title, content
+"""
 
 
 from urlparse import urljoin
 from urlparse import urljoin
 from cgi import escape
 from cgi import escape

+ 11 - 9
searx/engines/subtitleseeker.py

@@ -1,12 +1,14 @@
-## Subtitleseeker (Video)
-#
-# @website     http://www.subtitleseeker.com
-# @provide-api no
-#
-# @using-api   no
-# @results     HTML
-# @stable      no (HTML can change)
-# @parse       url, title, content
+"""
+ Subtitleseeker (Video)
+
+ @website     http://www.subtitleseeker.com
+ @provide-api no
+
+ @using-api   no
+ @results     HTML
+ @stable      no (HTML can change)
+ @parse       url, title, content
+"""
 
 
 from cgi import escape
 from cgi import escape
 from urllib import quote_plus
 from urllib import quote_plus

+ 13 - 11
searx/engines/twitter.py

@@ -1,14 +1,16 @@
-## Twitter (Social media)
-#
-# @website     https://twitter.com/
-# @provide-api yes (https://dev.twitter.com/docs/using-search)
-#
-# @using-api   no
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content
-#
-# @todo        publishedDate
+"""
+ Twitter (Social media)
+
+ @website     https://twitter.com/
+ @provide-api yes (https://dev.twitter.com/docs/using-search)
+
+ @using-api   no
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content
+
+ @todo        publishedDate
+"""
 
 
 from urlparse import urljoin
 from urlparse import urljoin
 from urllib import urlencode
 from urllib import urlencode

+ 10 - 9
searx/engines/www1x.py

@@ -1,13 +1,14 @@
-## 1x (Images)
-#
-# @website     http://1x.com/
-# @provide-api no
-#
-# @using-api   no
-# @results     HTML
-# @stable      no (HTML can change)
-# @parse       url, title, thumbnail, img_src, content
+"""
+ 1x (Images)
 
 
+ @website     http://1x.com/
+ @provide-api no
+
+ @using-api   no
+ @results     HTML
+ @stable      no (HTML can change)
+ @parse       url, title, thumbnail, img_src, content
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from urlparse import urljoin
 from urlparse import urljoin

+ 13 - 11
searx/engines/www500px.py

@@ -1,14 +1,16 @@
-## 500px (Images)
-#
-# @website     https://500px.com
-# @provide-api yes (https://developers.500px.com/)
-#
-# @using-api   no
-# @results     HTML
-# @stable      no (HTML can change)
-# @parse       url, title, thumbnail, img_src, content
-#
-# @todo        rewrite to api
+"""
+ 500px (Images)
+
+ @website     https://500px.com
+ @provide-api yes (https://developers.500px.com/)
+
+ @using-api   no
+ @results     HTML
+ @stable      no (HTML can change)
+ @parse       url, title, thumbnail, img_src, content
+
+ @todo        rewrite to api
+"""
 
 
 
 
 from urllib import urlencode
 from urllib import urlencode

+ 1 - 1
searx/engines/yacy.py

@@ -1,4 +1,4 @@
-## Yacy (Web, Images, Videos, Music, Files)
+# Yacy (Web, Images, Videos, Music, Files)
 #
 #
 # @website     http://yacy.net
 # @website     http://yacy.net
 # @provide-api yes
 # @provide-api yes

+ 12 - 10
searx/engines/yahoo.py

@@ -1,13 +1,15 @@
-## Yahoo (Web)
-#
-# @website     https://search.yahoo.com/web
-# @provide-api yes (https://developer.yahoo.com/boss/search/),
-#              $0.80/1000 queries
-#
-# @using-api   no (because pricing)
-# @results     HTML (using search portal)
-# @stable      no (HTML can change)
-# @parse       url, title, content, suggestion
+"""
+ Yahoo (Web)
+
+ @website     https://search.yahoo.com/web
+ @provide-api yes (https://developer.yahoo.com/boss/search/),
+              $0.80/1000 queries
+
+ @using-api   no (because pricing)
+ @results     HTML (using search portal)
+ @stable      no (HTML can change)
+ @parse       url, title, content, suggestion
+"""
 
 
 from urllib import urlencode
 from urllib import urlencode
 from urlparse import unquote
 from urlparse import unquote

+ 2 - 2
searx/engines/youtube.py

@@ -1,4 +1,4 @@
-## Youtube (Videos)
+# Youtube (Videos)
 #
 #
 # @website     https://www.youtube.com/
 # @website     https://www.youtube.com/
 # @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
 # @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
@@ -47,7 +47,7 @@ def response(resp):
     search_results = loads(resp.text)
     search_results = loads(resp.text)
 
 
     # return empty array if there are no results
     # return empty array if there are no results
-    if not 'feed' in search_results:
+    if 'feed' not in search_results:
         return []
         return []
 
 
     feed = search_results['feed']
     feed = search_results['feed']

+ 69 - 50
versions.cfg

@@ -2,96 +2,115 @@
 Babel = 1.3
 Babel = 1.3
 Flask = 0.10.1
 Flask = 0.10.1
 Flask-Babel = 0.9
 Flask-Babel = 0.9
-Jinja2 = 2.7.2
-MarkupSafe = 0.18
-Pygments = 2.0.1
-WebOb = 1.3.1
-WebTest = 2.0.11
-Werkzeug = 0.9.4
+Jinja2 = 2.7.3
+MarkupSafe = 0.23
+Pygments = 2.0.2
+WebOb = 1.4.1
+WebTest = 2.0.18
+Werkzeug = 0.10.4
 buildout-versions = 1.7
 buildout-versions = 1.7
 collective.recipe.omelette = 0.16
 collective.recipe.omelette = 0.16
 coverage = 3.7.1
 coverage = 3.7.1
-decorator = 3.4.0
-docutils = 0.11
-flake8 = 2.1.0
-itsdangerous = 0.23
-mccabe = 0.2.1
+decorator = 3.4.2
+docutils = 0.12
+flake8 = 2.4.0
+itsdangerous = 0.24
+mccabe = 0.3
 mock = 1.0.1
 mock = 1.0.1
-pep8 = 1.4.6
-plone.testing = 4.0.8
-pyflakes = 0.7.3
-pytz = 2013b
-pyyaml = 3.10
-requests = 2.5.3
+pep8 = 1.5.7
+plone.testing = 4.0.13
+pyflakes = 0.8.1
+pytz = 2015.2
+pyyaml = 3.11
+requests = 2.6.2
 robotframework-debuglibrary = 0.3
 robotframework-debuglibrary = 0.3
 robotframework-httplibrary = 0.4.2
 robotframework-httplibrary = 0.4.2
-robotframework-selenium2library = 1.5.0
-robotsuite = 1.4.2
-selenium = 2.39.0
+robotframework-selenium2library = 1.6.0
+robotsuite = 1.6.1
+selenium = 2.45.0
 speaklater = 1.3
 speaklater = 1.3
-unittest2 = 0.5.1
-waitress = 0.8.8
+unittest2 = 1.0.1
+waitress = 0.8.9
 zc.recipe.testrunner = 2.0.0
 zc.recipe.testrunner = 2.0.0
 pyopenssl = 0.15.1
 pyopenssl = 0.15.1
 ndg-httpsclient = 0.3.3
 ndg-httpsclient = 0.3.3
 pyasn1 = 0.1.7
 pyasn1 = 0.1.7
 pyasn1-modules = 0.0.5
 pyasn1-modules = 0.0.5
-certifi = 14.05.14
+certifi = 2015.04.28
+
+# 
+cffi = 0.9.2
+cryptography = 0.8.2
 
 
 # Required by:
 # Required by:
-# WebTest==2.0.11
+# WebTest==2.0.18
 beautifulsoup4 = 4.3.2
 beautifulsoup4 = 4.3.2
 
 
+# Required by:
+# cryptography==0.8.2
+enum34 = 1.0.4
+
 # Required by:
 # Required by:
 # robotframework-httplibrary==0.4.2
 # robotframework-httplibrary==0.4.2
-jsonpatch = 1.3
+jsonpatch = 1.9
 
 
 # Required by:
 # Required by:
 # robotframework-httplibrary==0.4.2
 # robotframework-httplibrary==0.4.2
-jsonpointer = 1.1
+jsonpointer = 1.7
+
+# Required by:
+# traceback2==1.4.0
+linecache2 = 1.0.0
+
+# Required by:
+# robotsuite==1.6.1
+# searx==0.7.0
+lxml = 3.4.4
 
 
 # Required by:
 # Required by:
-# robotsuite==1.4.2
-# searx==0.1
-lxml = 3.2.5
+# cffi==0.9.2
+pycparser = 2.12
+
+# Required by:
+# searx==0.7.0
+python-dateutil = 2.4.2
 
 
 # Required by:
 # Required by:
 # robotframework-httplibrary==0.4.2
 # robotframework-httplibrary==0.4.2
-robotframework = 2.8.3
+robotframework = 2.8.7
+
+# Required by:
+# searx==0.7.0
+# zope.exceptions==4.0.7
+# zope.interface==4.1.2
+# zope.testrunner==4.4.8
+setuptools = 15.2
 
 
 # Required by:
 # Required by:
-# plone.testing==4.0.8
-# robotsuite==1.4.2
-# searx==0.1
-# zope.exceptions==4.0.6
-# zope.interface==4.0.5
-# zope.testrunner==4.4.1
-setuptools = 2.1
+# robotsuite==1.6.1
+# zope.testrunner==4.4.8
+six = 1.9.0
 
 
 # Required by:
 # Required by:
-# zope.testrunner==4.4.1
-six = 1.6.1
+# unittest2==1.0.1
+traceback2 = 1.4.0
 
 
 # Required by:
 # Required by:
 # collective.recipe.omelette==0.16
 # collective.recipe.omelette==0.16
 zc.recipe.egg = 2.0.1
 zc.recipe.egg = 2.0.1
 
 
 # Required by:
 # Required by:
-# zope.testrunner==4.4.1
-zope.exceptions = 4.0.6
+# zope.testrunner==4.4.8
+zope.exceptions = 4.0.7
 
 
 # Required by:
 # Required by:
-# zope.testrunner==4.4.1
-zope.interface = 4.0.5
+# zope.testrunner==4.4.8
+zope.interface = 4.1.2
 
 
 # Required by:
 # Required by:
-# plone.testing==4.0.8
-zope.testing = 4.1.2
+# plone.testing==4.0.13
+zope.testing = 4.1.3
 
 
 # Required by:
 # Required by:
 # zc.recipe.testrunner==2.0.0
 # zc.recipe.testrunner==2.0.0
-zope.testrunner = 4.4.1
-
-# Required by:
-# searx==0.3.0
-python-dateutil = 2.2
+zope.testrunner = 4.4.8