startpage.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Startpage's language & region selectors are a mess ..
  3. .. _startpage regions:
  4. Startpage regions
  5. =================
  6. In the list of regions there are tags we need to map to common region tags::
  7. pt-BR_BR --> pt_BR
  8. zh-CN_CN --> zh_Hans_CN
  9. zh-TW_TW --> zh_Hant_TW
  10. zh-TW_HK --> zh_Hant_HK
  11. en-GB_GB --> en_GB
  12. and there is at least one tag with a three letter language tag (ISO 639-2)::
  13. fil_PH --> fil_PH
  14. The locale code ``no_NO`` from Startpage does not exists and is mapped to
  15. ``nb-NO``::
  16. babel.core.UnknownLocaleError: unknown locale 'no_NO'
  17. For reference see languages-subtag at iana; ``no`` is the macrolanguage [1]_ and
  18. W3C recommends subtag over macrolanguage [2]_.
  19. .. [1] `iana: language-subtag-registry
  20. <https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry>`_ ::
  21. type: language
  22. Subtag: nb
  23. Description: Norwegian Bokmål
  24. Added: 2005-10-16
  25. Suppress-Script: Latn
  26. Macrolanguage: no
  27. .. [2]
  28. Use macrolanguages with care. Some language subtags have a Scope field set to
  29. macrolanguage, i.e. this primary language subtag encompasses a number of more
  30. specific primary language subtags in the registry. ... As we recommended for
  31. the collection subtags mentioned above, in most cases you should try to use
  32. the more specific subtags ... `W3: The primary language subtag
  33. <https://www.w3.org/International/questions/qa-choosing-language-tags#langsubtag>`_
  34. .. _startpage languages:
  35. Startpage languages
  36. ===================
  37. :py:obj:`send_accept_language_header`:
  38. The displayed name in Startpage's settings page depend on the location of the
  39. IP when ``Accept-Language`` HTTP header is unset. In :py:obj:`fetch_traits`
  40. we use::
  41. 'Accept-Language': "en-US,en;q=0.5",
  42. ..
  43. to get uniform names independent from the IP).
  44. .. _startpage categories:
  45. Startpage categories
  46. ====================
  47. Startpage's category (for Web-search, News, Videos, ..) is set by
  48. :py:obj:`startpage_categ` in settings.yml::
  49. - name: startpage
  50. engine: startpage
  51. startpage_categ: web
  52. ...
  53. .. hint::
  54. Supported categories are ``web``, ``news`` and ``images``.
  55. """
  56. # pylint: disable=too-many-statements
  57. from __future__ import annotations
  58. from typing import TYPE_CHECKING, Any
  59. from collections import OrderedDict
  60. import re
  61. from unicodedata import normalize, combining
  62. from datetime import datetime, timedelta
  63. from json import loads
  64. import dateutil.parser
  65. import lxml.html
  66. import babel.localedata
  67. from searx.utils import extr, extract_text, eval_xpath, gen_useragent, html_to_text, humanize_bytes, remove_pua_from_str
  68. from searx.network import get # see https://github.com/searxng/searxng/issues/762
  69. from searx.exceptions import SearxEngineCaptchaException
  70. from searx.locales import region_tag
  71. from searx.enginelib.traits import EngineTraits
  72. from searx.enginelib import EngineCache
  73. if TYPE_CHECKING:
  74. import logging
  75. logger: logging.Logger
  76. traits: EngineTraits
  77. # about
  78. about = {
  79. "website": 'https://startpage.com',
  80. "wikidata_id": 'Q2333295',
  81. "official_api_documentation": None,
  82. "use_official_api": False,
  83. "require_api_key": False,
  84. "results": 'HTML',
  85. }
  86. startpage_categ = 'web'
  87. """Startpage's category, visit :ref:`startpage categories`.
  88. """
  89. send_accept_language_header = True
  90. """Startpage tries to guess user's language and territory from the HTTP
  91. ``Accept-Language``. Optional the user can select a search-language (can be
  92. different to the UI language) and a region filter.
  93. """
  94. # engine dependent config
  95. categories = ['general', 'web']
  96. paging = True
  97. max_page = 18
  98. """Tested 18 pages maximum (argument ``page``), to be save max is set to 20."""
  99. time_range_support = True
  100. safesearch = True
  101. time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
  102. safesearch_dict = {0: '0', 1: '1', 2: '1'}
  103. # search-url
  104. base_url = 'https://www.startpage.com'
  105. search_url = base_url + '/sp/search'
  106. # specific xpath variables
  107. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  108. # not ads: div[@class="result"] are the direct children of div[@id="results"]
  109. search_form_xpath = '//form[@id="search"]'
  110. """XPath of Startpage's origin search form
  111. .. code: html
  112. <form action="/sp/search" method="post">
  113. <input type="text" name="query" value="" ..>
  114. <input type="hidden" name="t" value="device">
  115. <input type="hidden" name="lui" value="english">
  116. <input type="hidden" name="sc" value="Q7Mt5TRqowKB00">
  117. <input type="hidden" name="cat" value="web">
  118. <input type="hidden" class="abp" id="abp-input" name="abp" value="1">
  119. </form>
  120. """
  121. CACHE: EngineCache
  122. """Persistent (SQLite) key/value cache that deletes its values after ``expire``
  123. seconds."""
  124. def init(_):
  125. global CACHE # pylint: disable=global-statement
  126. # hint: all three startpage engines (WEB, Images & News) can/should use the
  127. # same sc_code ..
  128. CACHE = EngineCache("startpage") # type:ignore
  129. sc_code_cache_sec = 3600
  130. """Time in seconds the sc-code is cached in memory :py:obj:`get_sc_code`."""
  131. def get_sc_code(searxng_locale, params):
  132. """Get an actual ``sc`` argument from Startpage's search form (HTML page).
  133. Startpage puts a ``sc`` argument on every HTML :py:obj:`search form
  134. <search_form_xpath>`. Without this argument Startpage considers the request
  135. is from a bot. We do not know what is encoded in the value of the ``sc``
  136. argument, but it seems to be a kind of a *time-stamp*.
  137. Startpage's search form generates a new sc-code on each request. This
  138. function scrap a new sc-code from Startpage's home page every
  139. :py:obj:`sc_code_cache_sec` seconds."""
  140. sc_code = CACHE.get("SC_CODE", "")
  141. if sc_code:
  142. return sc_code
  143. headers = {**params['headers']}
  144. headers['Origin'] = base_url
  145. headers['Referer'] = base_url + '/'
  146. # headers['Connection'] = 'keep-alive'
  147. # headers['Accept-Encoding'] = 'gzip, deflate, br'
  148. # headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8'
  149. # headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:105.0) Gecko/20100101 Firefox/105.0'
  150. # add Accept-Language header
  151. if searxng_locale == 'all':
  152. searxng_locale = 'en-US'
  153. locale = babel.Locale.parse(searxng_locale, sep='-')
  154. if send_accept_language_header:
  155. ac_lang = locale.language
  156. if locale.territory:
  157. ac_lang = "%s-%s,%s;q=0.9,*;q=0.5" % (
  158. locale.language,
  159. locale.territory,
  160. locale.language,
  161. )
  162. headers['Accept-Language'] = ac_lang
  163. get_sc_url = base_url + '/?sc=%s' % (sc_code)
  164. logger.debug("query new sc time-stamp ... %s", get_sc_url)
  165. logger.debug("headers: %s", headers)
  166. resp = get(get_sc_url, headers=headers)
  167. # ?? x = network.get('https://www.startpage.com/sp/cdn/images/filter-chevron.svg', headers=headers)
  168. # ?? https://www.startpage.com/sp/cdn/images/filter-chevron.svg
  169. # ?? ping-back URL: https://www.startpage.com/sp/pb?sc=TLsB0oITjZ8F21
  170. if str(resp.url).startswith('https://www.startpage.com/sp/captcha'): # type: ignore
  171. raise SearxEngineCaptchaException(
  172. message="get_sc_code: got redirected to https://www.startpage.com/sp/captcha",
  173. )
  174. dom = lxml.html.fromstring(resp.text) # type: ignore
  175. try:
  176. sc_code = eval_xpath(dom, search_form_xpath + '//input[@name="sc"]/@value')[0]
  177. except IndexError as exc:
  178. logger.debug("suspend startpage API --> https://github.com/searxng/searxng/pull/695")
  179. raise SearxEngineCaptchaException(
  180. message="get_sc_code: [PR-695] query new sc time-stamp failed! (%s)" % resp.url, # type: ignore
  181. ) from exc
  182. sc_code = str(sc_code)
  183. logger.debug("get_sc_code: new value is: %s", sc_code)
  184. CACHE.set(key="SC_CODE", value=sc_code, expire=sc_code_cache_sec)
  185. return sc_code
  186. def request(query, params):
  187. """Assemble a Startpage request.
  188. To avoid CAPTCHA we need to send a well formed HTTP POST request with a
  189. cookie. We need to form a request that is identical to the request build by
  190. Startpage's search form:
  191. - in the cookie the **region** is selected
  192. - in the HTTP POST data the **language** is selected
  193. Additionally the arguments form Startpage's search form needs to be set in
  194. HTML POST data / compare ``<input>`` elements: :py:obj:`search_form_xpath`.
  195. """
  196. engine_region = traits.get_region(params['searxng_locale'], 'en-US')
  197. engine_language = traits.get_language(params['searxng_locale'], 'en')
  198. # build arguments
  199. args = {
  200. 'query': query,
  201. 'cat': startpage_categ,
  202. 't': 'device',
  203. 'sc': get_sc_code(params['searxng_locale'], params), # hint: this func needs HTTP headers,
  204. 'with_date': time_range_dict.get(params['time_range'], ''),
  205. }
  206. if engine_language:
  207. args['language'] = engine_language
  208. args['lui'] = engine_language
  209. args['abp'] = '1'
  210. if params['pageno'] > 1:
  211. args['page'] = params['pageno']
  212. # build cookie
  213. lang_homepage = 'en'
  214. cookie = OrderedDict()
  215. cookie['date_time'] = 'world'
  216. cookie['disable_family_filter'] = safesearch_dict[params['safesearch']]
  217. cookie['disable_open_in_new_window'] = '0'
  218. cookie['enable_post_method'] = '1' # hint: POST
  219. cookie['enable_proxy_safety_suggest'] = '1'
  220. cookie['enable_stay_control'] = '1'
  221. cookie['instant_answers'] = '1'
  222. cookie['lang_homepage'] = 's/device/%s/' % lang_homepage
  223. cookie['num_of_results'] = '10'
  224. cookie['suggestions'] = '1'
  225. cookie['wt_unit'] = 'celsius'
  226. if engine_language:
  227. cookie['language'] = engine_language
  228. cookie['language_ui'] = engine_language
  229. if engine_region:
  230. cookie['search_results_region'] = engine_region
  231. params['cookies']['preferences'] = 'N1N'.join(["%sEEE%s" % x for x in cookie.items()])
  232. logger.debug('cookie preferences: %s', params['cookies']['preferences'])
  233. # POST request
  234. logger.debug("data: %s", args)
  235. params['data'] = args
  236. params['method'] = 'POST'
  237. params['url'] = search_url
  238. params['headers']['Origin'] = base_url
  239. params['headers']['Referer'] = base_url + '/'
  240. # is the Accept header needed?
  241. # params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  242. return params
  243. def _parse_published_date(content: str) -> tuple[str, datetime | None]:
  244. published_date = None
  245. # check if search result starts with something like: "2 Sep 2014 ... "
  246. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  247. date_pos = content.find('...') + 4
  248. date_string = content[0 : date_pos - 5]
  249. # fix content string
  250. content = content[date_pos:]
  251. try:
  252. published_date = dateutil.parser.parse(date_string, dayfirst=True)
  253. except ValueError:
  254. pass
  255. # check if search result starts with something like: "5 days ago ... "
  256. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  257. date_pos = content.find('...') + 4
  258. date_string = content[0 : date_pos - 5]
  259. # calculate datetime
  260. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group())) # type: ignore
  261. # fix content string
  262. content = content[date_pos:]
  263. return content, published_date
  264. def _get_web_result(result):
  265. content = html_to_text(result.get('description'))
  266. content, publishedDate = _parse_published_date(content)
  267. return {
  268. 'url': result['clickUrl'],
  269. 'title': html_to_text(result['title']),
  270. 'content': content,
  271. 'publishedDate': publishedDate,
  272. }
  273. def _get_news_result(result):
  274. title = remove_pua_from_str(html_to_text(result['title']))
  275. content = remove_pua_from_str(html_to_text(result.get('description')))
  276. publishedDate = None
  277. if result.get('date'):
  278. publishedDate = datetime.fromtimestamp(result['date'] / 1000)
  279. thumbnailUrl = None
  280. if result.get('thumbnailUrl'):
  281. thumbnailUrl = base_url + result['thumbnailUrl']
  282. return {
  283. 'url': result['clickUrl'],
  284. 'title': title,
  285. 'content': content,
  286. 'publishedDate': publishedDate,
  287. 'thumbnail': thumbnailUrl,
  288. }
  289. def _get_image_result(result) -> dict[str, Any] | None:
  290. url = result.get('altClickUrl')
  291. if not url:
  292. return None
  293. thumbnailUrl = None
  294. if result.get('thumbnailUrl'):
  295. thumbnailUrl = base_url + result['thumbnailUrl']
  296. resolution = None
  297. if result.get('width') and result.get('height'):
  298. resolution = f"{result['width']}x{result['height']}"
  299. filesize = None
  300. if result.get('filesize'):
  301. size_str = ''.join(filter(str.isdigit, result['filesize']))
  302. filesize = humanize_bytes(int(size_str))
  303. return {
  304. 'template': 'images.html',
  305. 'url': url,
  306. 'title': html_to_text(result['title']),
  307. 'content': '',
  308. 'img_src': result.get('rawImageUrl'),
  309. 'thumbnail_src': thumbnailUrl,
  310. 'resolution': resolution,
  311. 'img_format': result.get('format'),
  312. 'filesize': filesize,
  313. }
  314. def response(resp):
  315. categ = startpage_categ.capitalize()
  316. results_raw = '{' + extr(resp.text, f"React.createElement(UIStartpage.AppSerp{categ}, {{", '}})') + '}}'
  317. results_json = loads(results_raw)
  318. results_obj = results_json.get('render', {}).get('presenter', {}).get('regions', {})
  319. results = []
  320. for results_categ in results_obj.get('mainline', []):
  321. for item in results_categ.get('results', []):
  322. if results_categ['display_type'] == 'web-google':
  323. results.append(_get_web_result(item))
  324. elif results_categ['display_type'] == 'news-bing':
  325. results.append(_get_news_result(item))
  326. elif 'images' in results_categ['display_type']:
  327. item = _get_image_result(item)
  328. if item:
  329. results.append(item)
  330. return results
  331. def fetch_traits(engine_traits: EngineTraits):
  332. """Fetch :ref:`languages <startpage languages>` and :ref:`regions <startpage
  333. regions>` from Startpage."""
  334. # pylint: disable=too-many-branches
  335. headers = {
  336. 'User-Agent': gen_useragent(),
  337. 'Accept-Language': "en-US,en;q=0.5", # bing needs to set the English language
  338. }
  339. resp = get('https://www.startpage.com/do/settings', headers=headers)
  340. if not resp.ok: # type: ignore
  341. print("ERROR: response from Startpage is not OK.")
  342. dom = lxml.html.fromstring(resp.text) # type: ignore
  343. # regions
  344. sp_region_names = []
  345. for option in dom.xpath('//form[@name="settings"]//select[@name="search_results_region"]/option'):
  346. sp_region_names.append(option.get('value'))
  347. for eng_tag in sp_region_names:
  348. if eng_tag == 'all':
  349. continue
  350. babel_region_tag = {'no_NO': 'nb_NO'}.get(eng_tag, eng_tag) # norway
  351. if '-' in babel_region_tag:
  352. l, r = babel_region_tag.split('-')
  353. r = r.split('_')[-1]
  354. sxng_tag = region_tag(babel.Locale.parse(l + '_' + r, sep='_'))
  355. else:
  356. try:
  357. sxng_tag = region_tag(babel.Locale.parse(babel_region_tag, sep='_'))
  358. except babel.UnknownLocaleError:
  359. print("ERROR: can't determine babel locale of startpage's locale %s" % eng_tag)
  360. continue
  361. conflict = engine_traits.regions.get(sxng_tag)
  362. if conflict:
  363. if conflict != eng_tag:
  364. print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
  365. continue
  366. engine_traits.regions[sxng_tag] = eng_tag
  367. # languages
  368. catalog_engine2code = {name.lower(): lang_code for lang_code, name in babel.Locale('en').languages.items()}
  369. # get the native name of every language known by babel
  370. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, babel.localedata.locale_identifiers()):
  371. native_name = babel.Locale(lang_code).get_language_name()
  372. if not native_name:
  373. print(f"ERROR: language name of startpage's language {lang_code} is unknown by babel")
  374. continue
  375. native_name = native_name.lower()
  376. # add native name exactly as it is
  377. catalog_engine2code[native_name] = lang_code
  378. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  379. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  380. if len(unaccented_name) == len(unaccented_name.encode()):
  381. # add only if result is ascii (otherwise "normalization" didn't work)
  382. catalog_engine2code[unaccented_name] = lang_code
  383. # values that can't be determined by babel's languages names
  384. catalog_engine2code.update(
  385. {
  386. # traditional chinese used in ..
  387. 'fantizhengwen': 'zh_Hant',
  388. # Korean alphabet
  389. 'hangul': 'ko',
  390. # Malayalam is one of 22 scheduled languages of India.
  391. 'malayam': 'ml',
  392. 'norsk': 'nb',
  393. 'sinhalese': 'si',
  394. }
  395. )
  396. skip_eng_tags = {
  397. 'english_uk', # SearXNG lang 'en' already maps to 'english'
  398. }
  399. for option in dom.xpath('//form[@name="settings"]//select[@name="language"]/option'):
  400. eng_tag = option.get('value')
  401. if eng_tag in skip_eng_tags:
  402. continue
  403. name = extract_text(option).lower() # type: ignore
  404. sxng_tag = catalog_engine2code.get(eng_tag)
  405. if sxng_tag is None:
  406. sxng_tag = catalog_engine2code[name]
  407. conflict = engine_traits.languages.get(sxng_tag)
  408. if conflict:
  409. if conflict != eng_tag:
  410. print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
  411. continue
  412. engine_traits.languages[sxng_tag] = eng_tag