startpage.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Startpage (Web)
  4. """
  5. from lxml import html
  6. from dateutil import parser
  7. from datetime import datetime, timedelta
  8. import re
  9. from unicodedata import normalize, combining
  10. from babel import Locale
  11. from babel.localedata import locale_identifiers
  12. from searx.utils import extract_text, eval_xpath, match_language
  13. # about
  14. about = {
  15. "website": 'https://startpage.com',
  16. "wikidata_id": 'Q2333295',
  17. "official_api_documentation": None,
  18. "use_official_api": False,
  19. "require_api_key": False,
  20. "results": 'HTML',
  21. }
  22. # engine dependent config
  23. categories = ['general']
  24. # there is a mechanism to block "bot" search
  25. # (probably the parameter qid), require
  26. # storing of qid's between mulitble search-calls
  27. paging = True
  28. language_support = True
  29. supported_languages_url = 'https://www.startpage.com/do/settings'
  30. # search-url
  31. base_url = 'https://startpage.com/'
  32. search_url = base_url + 'do/search'
  33. # specific xpath variables
  34. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  35. # not ads: div[@class="result"] are the direct childs of div[@id="results"]
  36. results_xpath = '//div[@class="w-gl__result__main"]'
  37. link_xpath = './/a[@class="w-gl__result-title result-link"]'
  38. content_xpath = './/p[@class="w-gl__description"]'
  39. # do search-request
  40. def request(query, params):
  41. params['url'] = search_url
  42. params['method'] = 'POST'
  43. params['data'] = {
  44. 'query': query,
  45. 'page': params['pageno'],
  46. 'cat': 'web',
  47. 'cmd': 'process_search',
  48. 'engine0': 'v1all',
  49. }
  50. # set language if specified
  51. if params['language'] != 'all':
  52. lang_code = match_language(params['language'], supported_languages, fallback=None)
  53. if lang_code:
  54. language_name = supported_languages[lang_code]['alias']
  55. params['data']['language'] = language_name
  56. params['data']['lui'] = language_name
  57. return params
  58. # get response from search-request
  59. def response(resp):
  60. results = []
  61. dom = html.fromstring(resp.text)
  62. # parse results
  63. for result in eval_xpath(dom, results_xpath):
  64. links = eval_xpath(result, link_xpath)
  65. if not links:
  66. continue
  67. link = links[0]
  68. url = link.attrib.get('href')
  69. # block google-ad url's
  70. if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
  71. continue
  72. # block startpage search url's
  73. if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
  74. continue
  75. title = extract_text(link)
  76. if eval_xpath(result, content_xpath):
  77. content = extract_text(eval_xpath(result, content_xpath))
  78. else:
  79. content = ''
  80. published_date = None
  81. # check if search result starts with something like: "2 Sep 2014 ... "
  82. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  83. date_pos = content.find('...') + 4
  84. date_string = content[0:date_pos - 5]
  85. # fix content string
  86. content = content[date_pos:]
  87. try:
  88. published_date = parser.parse(date_string, dayfirst=True)
  89. except ValueError:
  90. pass
  91. # check if search result starts with something like: "5 days ago ... "
  92. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  93. date_pos = content.find('...') + 4
  94. date_string = content[0:date_pos - 5]
  95. # calculate datetime
  96. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
  97. # fix content string
  98. content = content[date_pos:]
  99. if published_date:
  100. # append result
  101. results.append({'url': url,
  102. 'title': title,
  103. 'content': content,
  104. 'publishedDate': published_date})
  105. else:
  106. # append result
  107. results.append({'url': url,
  108. 'title': title,
  109. 'content': content})
  110. # return results
  111. return results
  112. # get supported languages from their site
  113. def _fetch_supported_languages(resp):
  114. # startpage's language selector is a mess
  115. # each option has a displayed name and a value, either of which may represent the language name
  116. # in the native script, the language name in English, an English transliteration of the native name,
  117. # the English name of the writing script used by the language, or occasionally something else entirely.
  118. # this cases are so special they need to be hardcoded, a couple of them are mispellings
  119. language_names = {
  120. 'english_uk': 'en-GB',
  121. 'fantizhengwen': ['zh-TW', 'zh-HK'],
  122. 'hangul': 'ko',
  123. 'malayam': 'ml',
  124. 'norsk': 'nb',
  125. 'sinhalese': 'si',
  126. 'sudanese': 'su'
  127. }
  128. # get the English name of every language known by babel
  129. language_names.update({name.lower(): lang_code for lang_code, name in Locale('en')._data['languages'].items()})
  130. # get the native name of every language known by babel
  131. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
  132. native_name = Locale(lang_code).get_language_name().lower()
  133. # add native name exactly as it is
  134. language_names[native_name] = lang_code
  135. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  136. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  137. if len(unaccented_name) == len(unaccented_name.encode()):
  138. # add only if result is ascii (otherwise "normalization" didn't work)
  139. language_names[unaccented_name] = lang_code
  140. dom = html.fromstring(resp.text)
  141. sp_lang_names = []
  142. for option in dom.xpath('//form[@id="settings-form"]//select[@name="language"]/option'):
  143. sp_lang_names.append((option.get('value'), extract_text(option).lower()))
  144. supported_languages = {}
  145. for sp_option_value, sp_option_text in sp_lang_names:
  146. lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
  147. if isinstance(lang_code, str):
  148. supported_languages[lang_code] = {'alias': sp_option_value}
  149. elif isinstance(lang_code, list):
  150. for lc in lang_code:
  151. supported_languages[lc] = {'alias': sp_option_value}
  152. else:
  153. print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
  154. return supported_languages