startpage.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Startpage (Web)
  4. """
  5. import re
  6. from urllib.parse import urlencode
  7. from unicodedata import normalize, combining
  8. from datetime import datetime, timedelta
  9. from dateutil import parser
  10. from lxml import html
  11. from babel import Locale
  12. from babel.localedata import locale_identifiers
  13. from searx.utils import extract_text, eval_xpath, match_language
  14. # about
  15. about = {
  16. "website": 'https://startpage.com',
  17. "wikidata_id": 'Q2333295',
  18. "official_api_documentation": None,
  19. "use_official_api": False,
  20. "require_api_key": False,
  21. "results": 'HTML',
  22. }
  23. # engine dependent config
  24. categories = ['general', 'web']
  25. # there is a mechanism to block "bot" search
  26. # (probably the parameter qid), require
  27. # storing of qid's between mulitble search-calls
  28. paging = True
  29. supported_languages_url = 'https://www.startpage.com/do/settings'
  30. # search-url
  31. base_url = 'https://startpage.com/'
  32. search_url = base_url + 'sp/search?'
  33. # specific xpath variables
  34. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  35. # not ads: div[@class="result"] are the direct childs of div[@id="results"]
  36. results_xpath = '//div[@class="w-gl__result__main"]'
  37. link_xpath = './/a[@class="w-gl__result-title result-link"]'
  38. content_xpath = './/p[@class="w-gl__description"]'
  39. # do search-request
  40. def request(query, params):
  41. args = {
  42. 'query': query,
  43. 'page': params['pageno'],
  44. 'cat': 'web',
  45. # 'abp': "-1",
  46. 'sc': 'Mj4jZy61QETj20',
  47. }
  48. # set language if specified
  49. if params['language'] != 'all':
  50. lang_code = match_language(params['language'], supported_languages, fallback=None)
  51. if lang_code:
  52. language_name = supported_languages[lang_code]['alias']
  53. args['language'] = language_name
  54. args['lui'] = language_name
  55. params['url'] = search_url + urlencode(args)
  56. return params
  57. # get response from search-request
  58. def response(resp):
  59. results = []
  60. dom = html.fromstring(resp.text)
  61. # parse results
  62. for result in eval_xpath(dom, results_xpath):
  63. links = eval_xpath(result, link_xpath)
  64. if not links:
  65. continue
  66. link = links[0]
  67. url = link.attrib.get('href')
  68. # block google-ad url's
  69. if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
  70. continue
  71. # block startpage search url's
  72. if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
  73. continue
  74. title = extract_text(link)
  75. if eval_xpath(result, content_xpath):
  76. content = extract_text(eval_xpath(result, content_xpath))
  77. else:
  78. content = ''
  79. published_date = None
  80. # check if search result starts with something like: "2 Sep 2014 ... "
  81. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  82. date_pos = content.find('...') + 4
  83. date_string = content[0 : date_pos - 5]
  84. # fix content string
  85. content = content[date_pos:]
  86. try:
  87. published_date = parser.parse(date_string, dayfirst=True)
  88. except ValueError:
  89. pass
  90. # check if search result starts with something like: "5 days ago ... "
  91. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  92. date_pos = content.find('...') + 4
  93. date_string = content[0 : date_pos - 5]
  94. # calculate datetime
  95. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
  96. # fix content string
  97. content = content[date_pos:]
  98. if published_date:
  99. # append result
  100. results.append({'url': url, 'title': title, 'content': content, 'publishedDate': published_date})
  101. else:
  102. # append result
  103. results.append({'url': url, 'title': title, 'content': content})
  104. # return results
  105. return results
  106. # get supported languages from their site
  107. def _fetch_supported_languages(resp):
  108. # startpage's language selector is a mess each option has a displayed name
  109. # and a value, either of which may represent the language name in the native
  110. # script, the language name in English, an English transliteration of the
  111. # native name, the English name of the writing script used by the language,
  112. # or occasionally something else entirely.
  113. # this cases are so special they need to be hardcoded, a couple of them are mispellings
  114. language_names = {
  115. 'english_uk': 'en-GB',
  116. 'fantizhengwen': ['zh-TW', 'zh-HK'],
  117. 'hangul': 'ko',
  118. 'malayam': 'ml',
  119. 'norsk': 'nb',
  120. 'sinhalese': 'si',
  121. 'sudanese': 'su',
  122. }
  123. # get the English name of every language known by babel
  124. language_names.update(
  125. {
  126. # fmt: off
  127. name.lower(): lang_code
  128. # pylint: disable=protected-access
  129. for lang_code, name in Locale('en')._data['languages'].items()
  130. # fmt: on
  131. }
  132. )
  133. # get the native name of every language known by babel
  134. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
  135. native_name = Locale(lang_code).get_language_name().lower()
  136. # add native name exactly as it is
  137. language_names[native_name] = lang_code
  138. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  139. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  140. if len(unaccented_name) == len(unaccented_name.encode()):
  141. # add only if result is ascii (otherwise "normalization" didn't work)
  142. language_names[unaccented_name] = lang_code
  143. dom = html.fromstring(resp.text)
  144. sp_lang_names = []
  145. for option in dom.xpath('//form[@id="settings-form"]//select[@name="language"]/option'):
  146. sp_lang_names.append((option.get('value'), extract_text(option).lower()))
  147. supported_languages = {}
  148. for sp_option_value, sp_option_text in sp_lang_names:
  149. lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
  150. if isinstance(lang_code, str):
  151. supported_languages[lang_code] = {'alias': sp_option_value}
  152. elif isinstance(lang_code, list):
  153. for _lc in lang_code:
  154. supported_languages[_lc] = {'alias': sp_option_value}
  155. else:
  156. print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
  157. return supported_languages