startpage.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Startpage (Web)
  4. """
  5. import re
  6. from time import time
  7. from urllib.parse import urlencode
  8. from unicodedata import normalize, combining
  9. from datetime import datetime, timedelta
  10. from dateutil import parser
  11. from lxml import html
  12. from babel import Locale
  13. from babel.localedata import locale_identifiers
  14. from searx import network
  15. from searx.utils import extract_text, eval_xpath, match_language
  16. # about
  17. about = {
  18. "website": 'https://startpage.com',
  19. "wikidata_id": 'Q2333295',
  20. "official_api_documentation": None,
  21. "use_official_api": False,
  22. "require_api_key": False,
  23. "results": 'HTML',
  24. }
  25. # engine dependent config
  26. categories = ['general', 'web']
  27. # there is a mechanism to block "bot" search
  28. # (probably the parameter qid), require
  29. # storing of qid's between mulitble search-calls
  30. paging = True
  31. supported_languages_url = 'https://www.startpage.com/do/settings'
  32. # search-url
  33. base_url = 'https://startpage.com/'
  34. search_url = base_url + 'sp/search?'
  35. # specific xpath variables
  36. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  37. # not ads: div[@class="result"] are the direct childs of div[@id="results"]
  38. results_xpath = '//div[@class="w-gl__result__main"]'
  39. link_xpath = './/a[@class="w-gl__result-title result-link"]'
  40. content_xpath = './/p[@class="w-gl__description"]'
  41. # timestamp of the last fetch of 'sc' code
  42. sc_code_ts = 0
  43. sc_code = ''
  44. def get_sc_code(headers):
  45. """Get an actual `sc` argument from startpage's home page.
  46. Startpage puts a `sc` argument on every link. Without this argument
  47. startpage considers the request is from a bot. We do not know what is
  48. encoded in the value of the `sc` argument, but it seems to be a kind of a
  49. *time-stamp*. This *time-stamp* is valid for a few hours.
  50. This function scrap a new *time-stamp* from startpage's home page every hour
  51. (3000 sec).
  52. """
  53. global sc_code_ts, sc_code # pylint: disable=global-statement
  54. if time() > (sc_code_ts + 3000):
  55. logger.debug("query new sc time-stamp ...")
  56. resp = network.get(base_url, headers=headers)
  57. dom = html.fromstring(resp.text)
  58. # href --> '/?sc=adrKJMgF8xwp20'
  59. href = eval_xpath(dom, '//a[@class="footer-home__logo"]')[0].get('href')
  60. sc_code = href[5:]
  61. sc_code_ts = time()
  62. logger.debug("new value is: %s", sc_code)
  63. return sc_code
  64. # do search-request
  65. def request(query, params):
  66. args = {
  67. 'query': query,
  68. 'page': params['pageno'],
  69. 'cat': 'web',
  70. # 'abp': "-1",
  71. 'sc': get_sc_code(params['headers']),
  72. }
  73. # set language if specified
  74. if params['language'] != 'all':
  75. lang_code = match_language(params['language'], supported_languages, fallback=None)
  76. if lang_code:
  77. language_name = supported_languages[lang_code]['alias']
  78. args['language'] = language_name
  79. args['lui'] = language_name
  80. params['url'] = search_url + urlencode(args)
  81. return params
  82. # get response from search-request
  83. def response(resp):
  84. results = []
  85. dom = html.fromstring(resp.text)
  86. # parse results
  87. for result in eval_xpath(dom, results_xpath):
  88. links = eval_xpath(result, link_xpath)
  89. if not links:
  90. continue
  91. link = links[0]
  92. url = link.attrib.get('href')
  93. # block google-ad url's
  94. if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
  95. continue
  96. # block startpage search url's
  97. if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
  98. continue
  99. title = extract_text(link)
  100. if eval_xpath(result, content_xpath):
  101. content = extract_text(eval_xpath(result, content_xpath))
  102. else:
  103. content = ''
  104. published_date = None
  105. # check if search result starts with something like: "2 Sep 2014 ... "
  106. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  107. date_pos = content.find('...') + 4
  108. date_string = content[0 : date_pos - 5]
  109. # fix content string
  110. content = content[date_pos:]
  111. try:
  112. published_date = parser.parse(date_string, dayfirst=True)
  113. except ValueError:
  114. pass
  115. # check if search result starts with something like: "5 days ago ... "
  116. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  117. date_pos = content.find('...') + 4
  118. date_string = content[0 : date_pos - 5]
  119. # calculate datetime
  120. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
  121. # fix content string
  122. content = content[date_pos:]
  123. if published_date:
  124. # append result
  125. results.append({'url': url, 'title': title, 'content': content, 'publishedDate': published_date})
  126. else:
  127. # append result
  128. results.append({'url': url, 'title': title, 'content': content})
  129. # return results
  130. return results
  131. # get supported languages from their site
  132. def _fetch_supported_languages(resp):
  133. # startpage's language selector is a mess each option has a displayed name
  134. # and a value, either of which may represent the language name in the native
  135. # script, the language name in English, an English transliteration of the
  136. # native name, the English name of the writing script used by the language,
  137. # or occasionally something else entirely.
  138. # this cases are so special they need to be hardcoded, a couple of them are mispellings
  139. language_names = {
  140. 'english_uk': 'en-GB',
  141. 'fantizhengwen': ['zh-TW', 'zh-HK'],
  142. 'hangul': 'ko',
  143. 'malayam': 'ml',
  144. 'norsk': 'nb',
  145. 'sinhalese': 'si',
  146. 'sudanese': 'su',
  147. }
  148. # get the English name of every language known by babel
  149. language_names.update(
  150. {
  151. # fmt: off
  152. name.lower(): lang_code
  153. # pylint: disable=protected-access
  154. for lang_code, name in Locale('en')._data['languages'].items()
  155. # fmt: on
  156. }
  157. )
  158. # get the native name of every language known by babel
  159. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
  160. native_name = Locale(lang_code).get_language_name().lower()
  161. # add native name exactly as it is
  162. language_names[native_name] = lang_code
  163. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  164. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  165. if len(unaccented_name) == len(unaccented_name.encode()):
  166. # add only if result is ascii (otherwise "normalization" didn't work)
  167. language_names[unaccented_name] = lang_code
  168. dom = html.fromstring(resp.text)
  169. sp_lang_names = []
  170. for option in dom.xpath('//form[@id="settings-form"]//select[@name="language"]/option'):
  171. sp_lang_names.append((option.get('value'), extract_text(option).lower()))
  172. supported_languages = {}
  173. for sp_option_value, sp_option_text in sp_lang_names:
  174. lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
  175. if isinstance(lang_code, str):
  176. supported_languages[lang_code] = {'alias': sp_option_value}
  177. elif isinstance(lang_code, list):
  178. for _lc in lang_code:
  179. supported_languages[_lc] = {'alias': sp_option_value}
  180. else:
  181. print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
  182. return supported_languages