utils.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. # import htmlentitydefs
  2. import cStringIO
  3. import csv
  4. import os
  5. import re
  6. from babel.dates import format_date
  7. from codecs import getincrementalencoder
  8. from HTMLParser import HTMLParser
  9. from random import choice
  10. from searx.version import VERSION_STRING
  11. from searx import settings
  12. from searx import logger
  13. logger = logger.getChild('utils')
  14. ua_versions = ('33.0',
  15. '34.0',
  16. '35.0',
  17. '36.0',
  18. '37.0')
  19. ua_os = ('Windows NT 6.3; WOW64',
  20. 'X11; Linux x86_64',
  21. 'X11; Linux x86')
  22. ua = "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}"
  23. blocked_tags = ('script',
  24. 'style')
  25. def gen_useragent():
  26. # TODO
  27. return ua.format(os=choice(ua_os), version=choice(ua_versions))
  28. def searx_useragent():
  29. return 'searx/{searx_version} {suffix}'.format(
  30. searx_version=VERSION_STRING,
  31. suffix=settings['outgoing'].get('useragent_suffix', ''))
  32. def highlight_content(content, query):
  33. if not content:
  34. return None
  35. # ignoring html contents
  36. # TODO better html content detection
  37. if content.find('<') != -1:
  38. return content
  39. query = query.decode('utf-8')
  40. if content.lower().find(query.lower()) > -1:
  41. query_regex = u'({0})'.format(re.escape(query))
  42. content = re.sub(query_regex, '<span class="highlight">\\1</span>',
  43. content, flags=re.I | re.U)
  44. else:
  45. regex_parts = []
  46. for chunk in query.split():
  47. if len(chunk) == 1:
  48. regex_parts.append(u'\W+{0}\W+'.format(re.escape(chunk)))
  49. else:
  50. regex_parts.append(u'{0}'.format(re.escape(chunk)))
  51. query_regex = u'({0})'.format('|'.join(regex_parts))
  52. content = re.sub(query_regex, '<span class="highlight">\\1</span>',
  53. content, flags=re.I | re.U)
  54. return content
  55. class HTMLTextExtractor(HTMLParser):
  56. def __init__(self):
  57. HTMLParser.__init__(self)
  58. self.result = []
  59. self.tags = []
  60. def handle_starttag(self, tag, attrs):
  61. self.tags.append(tag)
  62. def handle_endtag(self, tag):
  63. if not self.tags:
  64. return
  65. if tag != self.tags[-1]:
  66. raise Exception("invalid html")
  67. self.tags.pop()
  68. def is_valid_tag(self):
  69. return not self.tags or self.tags[-1] not in blocked_tags
  70. def handle_data(self, d):
  71. if not self.is_valid_tag():
  72. return
  73. self.result.append(d)
  74. def handle_charref(self, number):
  75. if not self.is_valid_tag():
  76. return
  77. if number[0] in (u'x', u'X'):
  78. codepoint = int(number[1:], 16)
  79. else:
  80. codepoint = int(number)
  81. self.result.append(unichr(codepoint))
  82. def handle_entityref(self, name):
  83. if not self.is_valid_tag():
  84. return
  85. # codepoint = htmlentitydefs.name2codepoint[name]
  86. # self.result.append(unichr(codepoint))
  87. self.result.append(name)
  88. def get_text(self):
  89. return u''.join(self.result).strip()
  90. def html_to_text(html):
  91. html = html.replace('\n', ' ')
  92. html = ' '.join(html.split())
  93. s = HTMLTextExtractor()
  94. s.feed(html)
  95. return s.get_text()
  96. class UnicodeWriter:
  97. """
  98. A CSV writer which will write rows to CSV file "f",
  99. which is encoded in the given encoding.
  100. """
  101. def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
  102. # Redirect output to a queue
  103. self.queue = cStringIO.StringIO()
  104. self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
  105. self.stream = f
  106. self.encoder = getincrementalencoder(encoding)()
  107. def writerow(self, row):
  108. unicode_row = []
  109. for col in row:
  110. if type(col) == str or type(col) == unicode:
  111. unicode_row.append(col.encode('utf-8').strip())
  112. else:
  113. unicode_row.append(col)
  114. self.writer.writerow(unicode_row)
  115. # Fetch UTF-8 output from the queue ...
  116. data = self.queue.getvalue()
  117. data = data.decode("utf-8")
  118. # ... and reencode it into the target encoding
  119. data = self.encoder.encode(data)
  120. # write to the target stream
  121. self.stream.write(data)
  122. # empty queue
  123. self.queue.truncate(0)
  124. def writerows(self, rows):
  125. for row in rows:
  126. self.writerow(row)
  127. def get_themes(root):
  128. """Returns available themes list."""
  129. static_path = os.path.join(root, 'static')
  130. templates_path = os.path.join(root, 'templates')
  131. themes = os.listdir(os.path.join(static_path, 'themes'))
  132. return static_path, templates_path, themes
  133. def get_static_files(base_path):
  134. base_path = os.path.join(base_path, 'static')
  135. static_files = set()
  136. base_path_length = len(base_path) + 1
  137. for directory, _, files in os.walk(base_path):
  138. for filename in files:
  139. f = os.path.join(directory[base_path_length:], filename)
  140. static_files.add(f)
  141. return static_files
  142. def get_result_templates(base_path):
  143. base_path = os.path.join(base_path, 'templates')
  144. result_templates = set()
  145. base_path_length = len(base_path) + 1
  146. for directory, _, files in os.walk(base_path):
  147. if directory.endswith('result_templates'):
  148. for filename in files:
  149. f = os.path.join(directory[base_path_length:], filename)
  150. result_templates.add(f)
  151. return result_templates
  152. def format_date_by_locale(date, locale_string):
  153. # strftime works only on dates after 1900
  154. if date.year <= 1900:
  155. return date.isoformat().split('T')[0]
  156. if locale_string == 'all':
  157. locale_string = settings['ui']['default_locale'] or 'en_US'
  158. return format_date(date, locale=locale_string)
  159. def dict_subset(d, properties):
  160. result = {}
  161. for k in properties:
  162. if k in d:
  163. result[k] = d[k]
  164. return result
  165. def prettify_url(url, max_length=74):
  166. if len(url) > max_length:
  167. chunk_len = max_length / 2 + 1
  168. return u'{0}[...]{1}'.format(url[:chunk_len], url[-chunk_len:])
  169. else:
  170. return url
  171. # get element in list or default value
  172. def list_get(a_list, index, default=None):
  173. if len(a_list) > index:
  174. return a_list[index]
  175. else:
  176. return default
  177. def get_blocked_engines(engines, cookies):
  178. if 'blocked_engines' not in cookies:
  179. return [(engine_name, category) for engine_name in engines
  180. for category in engines[engine_name].categories if engines[engine_name].disabled]
  181. blocked_engine_strings = cookies.get('blocked_engines', '').split(',')
  182. blocked_engines = []
  183. if not blocked_engine_strings:
  184. return blocked_engines
  185. for engine_string in blocked_engine_strings:
  186. if engine_string.find('__') > -1:
  187. engine, category = engine_string.split('__', 1)
  188. if engine in engines and category in engines[engine].categories:
  189. blocked_engines.append((engine, category))
  190. elif engine_string in engines:
  191. for category in engines[engine_string].categories:
  192. blocked_engines.append((engine_string, category))
  193. return blocked_engines