startpage.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Startpage (Web)
  4. """
  5. import re
  6. from time import time
  7. from urllib.parse import urlencode
  8. from unicodedata import normalize, combining
  9. from datetime import datetime, timedelta
  10. from dateutil import parser
  11. from lxml import html
  12. from babel import Locale
  13. from babel.localedata import locale_identifiers
  14. from searx import network
  15. from searx.utils import extract_text, eval_xpath, match_language
  16. from searx.exceptions import (
  17. SearxEngineResponseException,
  18. SearxEngineCaptchaException,
  19. )
  20. # about
  21. about = {
  22. "website": 'https://startpage.com',
  23. "wikidata_id": 'Q2333295',
  24. "official_api_documentation": None,
  25. "use_official_api": False,
  26. "require_api_key": False,
  27. "results": 'HTML',
  28. }
  29. # engine dependent config
  30. categories = ['general', 'web']
  31. # there is a mechanism to block "bot" search
  32. # (probably the parameter qid), require
  33. # storing of qid's between mulitble search-calls
  34. paging = True
  35. supported_languages_url = 'https://www.startpage.com/do/settings'
  36. # search-url
  37. base_url = 'https://startpage.com/'
  38. search_url = base_url + 'sp/search?'
  39. # specific xpath variables
  40. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  41. # not ads: div[@class="result"] are the direct childs of div[@id="results"]
  42. results_xpath = '//div[@class="w-gl__result__main"]'
  43. link_xpath = './/a[@class="w-gl__result-title result-link"]'
  44. content_xpath = './/p[@class="w-gl__description"]'
  45. # timestamp of the last fetch of 'sc' code
  46. sc_code_ts = 0
  47. sc_code = ''
  48. def raise_captcha(resp):
  49. if str(resp.url).startswith('https://www.startpage.com/sp/captcha'):
  50. # suspend CAPTCHA for 7 days
  51. raise SearxEngineCaptchaException(suspended_time=7 * 24 * 3600)
  52. def get_sc_code(headers):
  53. """Get an actual `sc` argument from startpage's home page.
  54. Startpage puts a `sc` argument on every link. Without this argument
  55. startpage considers the request is from a bot. We do not know what is
  56. encoded in the value of the `sc` argument, but it seems to be a kind of a
  57. *time-stamp*. This *time-stamp* is valid for a few hours.
  58. This function scrap a new *time-stamp* from startpage's home page every hour
  59. (3000 sec).
  60. """
  61. global sc_code_ts, sc_code # pylint: disable=global-statement
  62. if time() > (sc_code_ts + 3000):
  63. logger.debug("query new sc time-stamp ...")
  64. resp = network.get(base_url, headers=headers)
  65. raise_captcha(resp)
  66. dom = html.fromstring(resp.text)
  67. try:
  68. # href --> '/?sc=adrKJMgF8xwp20'
  69. href = eval_xpath(dom, '//a[@class="footer-home__logo"]')[0].get('href')
  70. except IndexError as exc:
  71. # suspend startpage API --> https://github.com/searxng/searxng/pull/695
  72. raise SearxEngineResponseException(
  73. suspended_time=7 * 24 * 3600, message="PR-695: query new sc time-stamp failed!"
  74. )
  75. sc_code = href[5:]
  76. sc_code_ts = time()
  77. logger.debug("new value is: %s", sc_code)
  78. return sc_code
  79. # do search-request
  80. def request(query, params):
  81. args = {
  82. 'query': query,
  83. 'page': params['pageno'],
  84. 'cat': 'web',
  85. # 'abp': "-1",
  86. 'sc': get_sc_code(params['headers']),
  87. }
  88. # set language if specified
  89. if params['language'] != 'all':
  90. lang_code = match_language(params['language'], supported_languages, fallback=None)
  91. if lang_code:
  92. language_name = supported_languages[lang_code]['alias']
  93. args['language'] = language_name
  94. args['lui'] = language_name
  95. params['url'] = search_url + urlencode(args)
  96. return params
  97. # get response from search-request
  98. def response(resp):
  99. results = []
  100. dom = html.fromstring(resp.text)
  101. # parse results
  102. for result in eval_xpath(dom, results_xpath):
  103. links = eval_xpath(result, link_xpath)
  104. if not links:
  105. continue
  106. link = links[0]
  107. url = link.attrib.get('href')
  108. # block google-ad url's
  109. if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
  110. continue
  111. # block startpage search url's
  112. if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
  113. continue
  114. title = extract_text(link)
  115. if eval_xpath(result, content_xpath):
  116. content = extract_text(eval_xpath(result, content_xpath))
  117. else:
  118. content = ''
  119. published_date = None
  120. # check if search result starts with something like: "2 Sep 2014 ... "
  121. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  122. date_pos = content.find('...') + 4
  123. date_string = content[0 : date_pos - 5]
  124. # fix content string
  125. content = content[date_pos:]
  126. try:
  127. published_date = parser.parse(date_string, dayfirst=True)
  128. except ValueError:
  129. pass
  130. # check if search result starts with something like: "5 days ago ... "
  131. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  132. date_pos = content.find('...') + 4
  133. date_string = content[0 : date_pos - 5]
  134. # calculate datetime
  135. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
  136. # fix content string
  137. content = content[date_pos:]
  138. if published_date:
  139. # append result
  140. results.append({'url': url, 'title': title, 'content': content, 'publishedDate': published_date})
  141. else:
  142. # append result
  143. results.append({'url': url, 'title': title, 'content': content})
  144. # return results
  145. return results
  146. # get supported languages from their site
  147. def _fetch_supported_languages(resp):
  148. # startpage's language selector is a mess each option has a displayed name
  149. # and a value, either of which may represent the language name in the native
  150. # script, the language name in English, an English transliteration of the
  151. # native name, the English name of the writing script used by the language,
  152. # or occasionally something else entirely.
  153. # this cases are so special they need to be hardcoded, a couple of them are mispellings
  154. language_names = {
  155. 'english_uk': 'en-GB',
  156. 'fantizhengwen': ['zh-TW', 'zh-HK'],
  157. 'hangul': 'ko',
  158. 'malayam': 'ml',
  159. 'norsk': 'nb',
  160. 'sinhalese': 'si',
  161. 'sudanese': 'su',
  162. }
  163. # get the English name of every language known by babel
  164. language_names.update(
  165. {
  166. # fmt: off
  167. name.lower(): lang_code
  168. # pylint: disable=protected-access
  169. for lang_code, name in Locale('en')._data['languages'].items()
  170. # fmt: on
  171. }
  172. )
  173. # get the native name of every language known by babel
  174. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
  175. native_name = Locale(lang_code).get_language_name().lower()
  176. # add native name exactly as it is
  177. language_names[native_name] = lang_code
  178. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  179. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  180. if len(unaccented_name) == len(unaccented_name.encode()):
  181. # add only if result is ascii (otherwise "normalization" didn't work)
  182. language_names[unaccented_name] = lang_code
  183. dom = html.fromstring(resp.text)
  184. sp_lang_names = []
  185. for option in dom.xpath('//form[@id="settings-form"]//select[@name="language"]/option'):
  186. sp_lang_names.append((option.get('value'), extract_text(option).lower()))
  187. supported_languages = {}
  188. for sp_option_value, sp_option_text in sp_lang_names:
  189. lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
  190. if isinstance(lang_code, str):
  191. supported_languages[lang_code] = {'alias': sp_option_value}
  192. elif isinstance(lang_code, list):
  193. for _lc in lang_code:
  194. supported_languages[_lc] = {'alias': sp_option_value}
  195. else:
  196. print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
  197. return supported_languages