startpage.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Startpage's language & region selectors are a mess ..
  4. .. _startpage regions:
  5. Startpage regions
  6. =================
  7. In the list of regions there are tags we need to map to common region tags::
  8. pt-BR_BR --> pt_BR
  9. zh-CN_CN --> zh_Hans_CN
  10. zh-TW_TW --> zh_Hant_TW
  11. zh-TW_HK --> zh_Hant_HK
  12. en-GB_GB --> en_GB
  13. and there is at least one tag with a three letter language tag (ISO 639-2)::
  14. fil_PH --> fil_PH
  15. The locale code ``no_NO`` from Startpage does not exists and is mapped to
  16. ``nb-NO``::
  17. babel.core.UnknownLocaleError: unknown locale 'no_NO'
  18. For reference see languages-subtag at iana; ``no`` is the macrolanguage [1]_ and
  19. W3C recommends subtag over macrolanguage [2]_.
  20. .. [1] `iana: language-subtag-registry
  21. <https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry>`_ ::
  22. type: language
  23. Subtag: nb
  24. Description: Norwegian Bokmål
  25. Added: 2005-10-16
  26. Suppress-Script: Latn
  27. Macrolanguage: no
  28. .. [2]
  29. Use macrolanguages with care. Some language subtags have a Scope field set to
  30. macrolanguage, i.e. this primary language subtag encompasses a number of more
  31. specific primary language subtags in the registry. ... As we recommended for
  32. the collection subtags mentioned above, in most cases you should try to use
  33. the more specific subtags ... `W3: The primary language subtag
  34. <https://www.w3.org/International/questions/qa-choosing-language-tags#langsubtag>`_
  35. .. _startpage languages:
  36. Startpage languages
  37. ===================
  38. The displayed name in Startpage's settings page depend on the location of the IP
  39. when the 'Accept-Language' HTTP header is unset (in the language update script
  40. we use "en-US,en;q=0.5" to get uniform names independent from the IP).
  41. Each option has a displayed name and a value, either of which may represent the
  42. language name in the native script, the language name in English, an English
  43. transliteration of the native name, the English name of the writing script used
  44. by the language, or occasionally something else entirely.
  45. """
  46. import re
  47. from time import time
  48. from urllib.parse import urlencode
  49. from unicodedata import normalize, combining
  50. from datetime import datetime, timedelta
  51. from dateutil import parser
  52. from lxml import html
  53. from babel import Locale
  54. from babel.localedata import locale_identifiers
  55. from searx import network
  56. from searx.utils import extract_text, eval_xpath, match_language
  57. from searx.exceptions import (
  58. SearxEngineResponseException,
  59. SearxEngineCaptchaException,
  60. )
  61. from searx.enginelib.traits import EngineTraits
  62. traits: EngineTraits
  63. # about
  64. about = {
  65. "website": 'https://startpage.com',
  66. "wikidata_id": 'Q2333295',
  67. "official_api_documentation": None,
  68. "use_official_api": False,
  69. "require_api_key": False,
  70. "results": 'HTML',
  71. }
  72. # engine dependent config
  73. categories = ['general', 'web']
  74. # there is a mechanism to block "bot" search
  75. # (probably the parameter qid), require
  76. # storing of qid's between mulitble search-calls
  77. paging = True
  78. supported_languages_url = 'https://www.startpage.com/do/settings'
  79. # search-url
  80. base_url = 'https://startpage.com/'
  81. search_url = base_url + 'sp/search?'
  82. # specific xpath variables
  83. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  84. # not ads: div[@class="result"] are the direct childs of div[@id="results"]
  85. results_xpath = '//div[@class="w-gl__result__main"]'
  86. link_xpath = './/a[@class="w-gl__result-title result-link"]'
  87. content_xpath = './/p[@class="w-gl__description"]'
  88. # timestamp of the last fetch of 'sc' code
  89. sc_code_ts = 0
  90. sc_code = ''
  91. def raise_captcha(resp):
  92. if str(resp.url).startswith('https://www.startpage.com/sp/captcha'):
  93. raise SearxEngineCaptchaException()
  94. def get_sc_code(headers):
  95. """Get an actual ``sc`` argument from Startpage's home page.
  96. Startpage puts a ``sc`` argument on every link. Without this argument
  97. Startpage considers the request is from a bot. We do not know what is
  98. encoded in the value of the ``sc`` argument, but it seems to be a kind of a
  99. *time-stamp*. This *time-stamp* is valid for a few hours.
  100. This function scrap a new *time-stamp* from startpage's home page every hour
  101. (3000 sec).
  102. """
  103. global sc_code_ts, sc_code # pylint: disable=global-statement
  104. if time() > (sc_code_ts + 3000):
  105. logger.debug("query new sc time-stamp ...")
  106. resp = network.get(base_url, headers=headers)
  107. raise_captcha(resp)
  108. dom = html.fromstring(resp.text)
  109. try:
  110. # <input type="hidden" name="sc" value="...">
  111. sc_code = eval_xpath(dom, '//input[@name="sc"]/@value')[0]
  112. except IndexError as exc:
  113. # suspend startpage API --> https://github.com/searxng/searxng/pull/695
  114. raise SearxEngineResponseException(
  115. suspended_time=7 * 24 * 3600, message="PR-695: query new sc time-stamp failed!"
  116. ) from exc
  117. sc_code_ts = time()
  118. logger.debug("new value is: %s", sc_code)
  119. return sc_code
  120. # do search-request
  121. def request(query, params):
  122. # pylint: disable=line-too-long
  123. # The format string from Startpage's FFox add-on [1]::
  124. #
  125. # https://www.startpage.com/do/dsearch?query={searchTerms}&cat=web&pl=ext-ff&language=__MSG_extensionUrlLanguage__&extVersion=1.3.0
  126. #
  127. # [1] https://addons.mozilla.org/en-US/firefox/addon/startpage-private-search/
  128. args = {
  129. 'query': query,
  130. 'page': params['pageno'],
  131. 'cat': 'web',
  132. # 'pl': 'ext-ff',
  133. # 'extVersion': '1.3.0',
  134. # 'abp': "-1",
  135. 'sc': get_sc_code(params['headers']),
  136. }
  137. # set language if specified
  138. if params['language'] != 'all':
  139. lang_code = match_language(params['language'], supported_languages, fallback=None)
  140. if lang_code:
  141. language_name = supported_languages[lang_code]['alias']
  142. args['language'] = language_name
  143. args['lui'] = language_name
  144. params['url'] = search_url + urlencode(args)
  145. return params
  146. # get response from search-request
  147. def response(resp):
  148. results = []
  149. dom = html.fromstring(resp.text)
  150. # parse results
  151. for result in eval_xpath(dom, results_xpath):
  152. links = eval_xpath(result, link_xpath)
  153. if not links:
  154. continue
  155. link = links[0]
  156. url = link.attrib.get('href')
  157. # block google-ad url's
  158. if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
  159. continue
  160. # block startpage search url's
  161. if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
  162. continue
  163. title = extract_text(link)
  164. if eval_xpath(result, content_xpath):
  165. content = extract_text(eval_xpath(result, content_xpath))
  166. else:
  167. content = ''
  168. published_date = None
  169. # check if search result starts with something like: "2 Sep 2014 ... "
  170. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  171. date_pos = content.find('...') + 4
  172. date_string = content[0 : date_pos - 5]
  173. # fix content string
  174. content = content[date_pos:]
  175. try:
  176. published_date = parser.parse(date_string, dayfirst=True)
  177. except ValueError:
  178. pass
  179. # check if search result starts with something like: "5 days ago ... "
  180. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  181. date_pos = content.find('...') + 4
  182. date_string = content[0 : date_pos - 5]
  183. # calculate datetime
  184. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
  185. # fix content string
  186. content = content[date_pos:]
  187. if published_date:
  188. # append result
  189. results.append({'url': url, 'title': title, 'content': content, 'publishedDate': published_date})
  190. else:
  191. # append result
  192. results.append({'url': url, 'title': title, 'content': content})
  193. # return results
  194. return results
  195. # get supported languages from their site
  196. def _fetch_supported_languages(resp):
  197. # startpage's language selector is a mess each option has a displayed name
  198. # and a value, either of which may represent the language name in the native
  199. # script, the language name in English, an English transliteration of the
  200. # native name, the English name of the writing script used by the language,
  201. # or occasionally something else entirely.
  202. # this cases are so special they need to be hardcoded, a couple of them are misspellings
  203. language_names = {
  204. 'english_uk': 'en-GB',
  205. 'fantizhengwen': ['zh-TW', 'zh-HK'],
  206. 'hangul': 'ko',
  207. 'malayam': 'ml',
  208. 'norsk': 'nb',
  209. 'sinhalese': 'si',
  210. 'sudanese': 'su',
  211. }
  212. # get the English name of every language known by babel
  213. language_names.update(
  214. {
  215. # fmt: off
  216. name.lower(): lang_code
  217. # pylint: disable=protected-access
  218. for lang_code, name in Locale('en')._data['languages'].items()
  219. # fmt: on
  220. }
  221. )
  222. # get the native name of every language known by babel
  223. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
  224. native_name = Locale(lang_code).get_language_name().lower()
  225. # add native name exactly as it is
  226. language_names[native_name] = lang_code
  227. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  228. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  229. if len(unaccented_name) == len(unaccented_name.encode()):
  230. # add only if result is ascii (otherwise "normalization" didn't work)
  231. language_names[unaccented_name] = lang_code
  232. dom = html.fromstring(resp.text)
  233. sp_lang_names = []
  234. for option in dom.xpath('//form[@name="settings"]//select[@name="language"]/option'):
  235. sp_lang_names.append((option.get('value'), extract_text(option).lower()))
  236. supported_languages = {}
  237. for sp_option_value, sp_option_text in sp_lang_names:
  238. lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
  239. if isinstance(lang_code, str):
  240. supported_languages[lang_code] = {'alias': sp_option_value}
  241. elif isinstance(lang_code, list):
  242. for _lc in lang_code:
  243. supported_languages[_lc] = {'alias': sp_option_value}
  244. else:
  245. print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
  246. return supported_languages
  247. def fetch_traits(engine_traits: EngineTraits):
  248. """Fetch :ref:`languages <startpage languages>` and :ref:`regions <startpage
  249. regions>` from Startpage."""
  250. # pylint: disable=import-outside-toplevel, too-many-locals, too-many-branches
  251. # pylint: disable=too-many-statements
  252. engine_traits.data_type = 'supported_languages' # deprecated
  253. import babel
  254. from searx.utils import gen_useragent
  255. from searx.locales import region_tag
  256. headers = {
  257. 'User-Agent': gen_useragent(),
  258. 'Accept-Language': "en-US,en;q=0.5", # bing needs to set the English language
  259. }
  260. resp = network.get('https://www.startpage.com/do/settings', headers=headers)
  261. if not resp.ok:
  262. print("ERROR: response from Startpage is not OK.")
  263. dom = html.fromstring(resp.text)
  264. # regions
  265. sp_region_names = []
  266. for option in dom.xpath('//form[@name="settings"]//select[@name="search_results_region"]/option'):
  267. sp_region_names.append(option.get('value'))
  268. for eng_tag in sp_region_names:
  269. if eng_tag == 'all':
  270. continue
  271. babel_region_tag = {'no_NO': 'nb_NO'}.get(eng_tag, eng_tag) # norway
  272. if '-' in babel_region_tag:
  273. l, r = babel_region_tag.split('-')
  274. r = r.split('_')[-1]
  275. sxng_tag = region_tag(babel.Locale.parse(l + '_' + r, sep='_'))
  276. else:
  277. try:
  278. sxng_tag = region_tag(babel.Locale.parse(babel_region_tag, sep='_'))
  279. except babel.UnknownLocaleError:
  280. print("ERROR: can't determine babel locale of startpage's locale %s" % eng_tag)
  281. continue
  282. conflict = engine_traits.regions.get(sxng_tag)
  283. if conflict:
  284. if conflict != eng_tag:
  285. print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
  286. continue
  287. engine_traits.regions[sxng_tag] = eng_tag
  288. # languages
  289. catalog_engine2code = {name.lower(): lang_code for lang_code, name in babel.Locale('en').languages.items()}
  290. # get the native name of every language known by babel
  291. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, babel.localedata.locale_identifiers()):
  292. native_name = babel.Locale(lang_code).get_language_name().lower()
  293. # add native name exactly as it is
  294. catalog_engine2code[native_name] = lang_code
  295. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  296. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  297. if len(unaccented_name) == len(unaccented_name.encode()):
  298. # add only if result is ascii (otherwise "normalization" didn't work)
  299. catalog_engine2code[unaccented_name] = lang_code
  300. # values that can't be determined by babel's languages names
  301. catalog_engine2code.update(
  302. {
  303. # traditional chinese used in ..
  304. 'fantizhengwen': 'zh_Hant',
  305. # Korean alphabet
  306. 'hangul': 'ko',
  307. # Malayalam is one of 22 scheduled languages of India.
  308. 'malayam': 'ml',
  309. 'norsk': 'nb',
  310. 'sinhalese': 'si',
  311. }
  312. )
  313. skip_eng_tags = {
  314. 'english_uk', # SearXNG lang 'en' already maps to 'english'
  315. }
  316. for option in dom.xpath('//form[@name="settings"]//select[@name="language"]/option'):
  317. eng_tag = option.get('value')
  318. if eng_tag in skip_eng_tags:
  319. continue
  320. name = extract_text(option).lower()
  321. sxng_tag = catalog_engine2code.get(eng_tag)
  322. if sxng_tag is None:
  323. sxng_tag = catalog_engine2code[name]
  324. conflict = engine_traits.languages.get(sxng_tag)
  325. if conflict:
  326. if conflict != eng_tag:
  327. print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
  328. continue
  329. engine_traits.languages[sxng_tag] = eng_tag