Browse Source

[enh] removing result html tags

asciimoo 11 years ago
parent
commit
17bf00ee42
4 changed files with 7 additions and 6 deletions
  1. 2 1
      searx/engines/duckduckgo.py
  2. 2 2
      searx/engines/startpage.py
  3. 2 1
      searx/engines/twitter.py
  4. 1 2
      searx/engines/xpath.py

+ 2 - 1
searx/engines/duckduckgo.py

@@ -1,5 +1,6 @@
 from json import loads
 from urllib import urlencode
+from searx.utils import html_to_text
 
 url = 'https://duckduckgo.com/'
 search_url = url + 'd.js?{query}&l=us-en&p=1&s=0'
@@ -16,7 +17,7 @@ def response(resp):
         if not r.get('t'):
             continue
         results.append({'title': r['t']
-                       ,'content': r['a']
+                       ,'content': html_to_text(r['a'])
                        ,'url': r['u']
                        })
     return results

+ 2 - 2
searx/engines/startpage.py

@@ -1,4 +1,4 @@
-from urllib import quote
+from urllib import urlencode
 from lxml import html
 from urlparse import urlparse
 from cgi import escape
@@ -8,7 +8,7 @@ search_url = base_url+'do/search'
 
 def request(query, params):
     global search_url
-    query = quote(query.replace(' ', '+'), safe='+')
+    query = urlencode({'q': query})[2:]
     params['url'] = search_url
     params['method'] = 'POST'
     params['data'] = {'query': query}

+ 2 - 1
searx/engines/twitter.py

@@ -1,6 +1,7 @@
 from urlparse import urljoin
 from urllib import urlencode
 from lxml import html
+from cgi import escape
 
 categories = ['social media']
 
@@ -21,6 +22,6 @@ def response(resp):
         link = tweet.xpath('.//small[@class="time"]//a')[0]
         url = urljoin(base_url, link.attrib.get('href'))
         title = ''.join(tweet.xpath('.//span[@class="username js-action-profile-name"]//text()'))
-        content = ''.join(map(html.tostring, tweet.xpath('.//p[@class="js-tweet-text tweet-text"]//*')))
+        content = escape(''.join(tweet.xpath('.//p[@class="js-tweet-text tweet-text"]//text()')))
         results.append({'url': url, 'title': title, 'content': content})
     return results

+ 1 - 2
searx/engines/xpath.py

@@ -46,12 +46,11 @@ def request(query, params):
 def response(resp):
     results = []
     dom = html.fromstring(resp.text)
-    query = resp.search_params['query']
     if results_xpath:
         for result in dom.xpath(results_xpath):
             url = extract_url(result.xpath(url_xpath))
             title = ' '.join(result.xpath(title_xpath))
-            content = escape(' '.join(result.xpath(content_xpath))).replace(query, '<b>{0}</b>'.format(query))
+            content = escape(' '.join(result.xpath(content_xpath)))
             results.append({'url': url, 'title': title, 'content': content})
     else:
         for content, url, title in zip(dom.xpath(content_xpath), map(extract_url, dom.xpath(url_xpath)), dom.xpath(title_xpath)):