google.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """This is the implementation of the Google WEB engine. Some of this
  3. implementations (manly the :py:obj:`get_google_info`) are shared by other
  4. engines:
  5. - :ref:`google images engine`
  6. - :ref:`google news engine`
  7. - :ref:`google videos engine`
  8. - :ref:`google scholar engine`
  9. - :ref:`google autocomplete`
  10. """
  11. from __future__ import annotations
  12. from typing import TYPE_CHECKING
  13. import re
  14. import random
  15. import string
  16. import time
  17. from urllib.parse import urlencode
  18. from lxml import html
  19. import babel
  20. import babel.core
  21. import babel.languages
  22. from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
  23. from searx.locales import language_tag, region_tag, get_official_locales
  24. from searx.network import get # see https://github.com/searxng/searxng/issues/762
  25. from searx.exceptions import SearxEngineCaptchaException
  26. from searx.enginelib.traits import EngineTraits
  27. from searx.result_types import EngineResults
  28. if TYPE_CHECKING:
  29. import logging
  30. logger: logging.Logger
  31. traits: EngineTraits
  32. # about
  33. about = {
  34. "website": 'https://www.google.com',
  35. "wikidata_id": 'Q9366',
  36. "official_api_documentation": 'https://developers.google.com/custom-search/',
  37. "use_official_api": False,
  38. "require_api_key": False,
  39. "results": 'HTML',
  40. }
  41. # engine dependent config
  42. categories = ['general', 'web']
  43. paging = True
  44. max_page = 50
  45. time_range_support = True
  46. safesearch = True
  47. time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
  48. # Filter results. 0: None, 1: Moderate, 2: Strict
  49. filter_mapping = {0: 'off', 1: 'medium', 2: 'high'}
  50. # specific xpath variables
  51. # ------------------------
  52. # Suggestions are links placed in a *card-section*, we extract only the text
  53. # from the links not the links itself.
  54. suggestion_xpath = '//div[contains(@class, "EIaa9b")]//a'
  55. _arcid_range = string.ascii_letters + string.digits + "_-"
  56. _arcid_random: tuple[str, int] | None = None
  57. def ui_async(start: int) -> str:
  58. """Format of the response from UI's async request.
  59. - ``arc_id:<...>,use_ac:true,_fmt:prog``
  60. The arc_id is random generated every hour.
  61. """
  62. global _arcid_random # pylint: disable=global-statement
  63. use_ac = "use_ac:true"
  64. # _fmt:html returns a HTTP 500 when user search for celebrities like
  65. # '!google natasha allegri' or '!google chris evans'
  66. _fmt = "_fmt:prog"
  67. # create a new random arc_id every hour
  68. if not _arcid_random or (int(time.time()) - _arcid_random[1]) > 3600:
  69. _arcid_random = (''.join(random.choices(_arcid_range, k=23)), int(time.time()))
  70. arc_id = f"arc_id:srp_{_arcid_random[0]}_1{start:02}"
  71. return ",".join([arc_id, use_ac, _fmt])
  72. def get_google_info(params, eng_traits):
  73. """Composing various (language) properties for the google engines (:ref:`google
  74. API`).
  75. This function is called by the various google engines (:ref:`google web
  76. engine`, :ref:`google images engine`, :ref:`google news engine` and
  77. :ref:`google videos engine`).
  78. :param dict param: Request parameters of the engine. At least
  79. a ``searxng_locale`` key should be in the dictionary.
  80. :param eng_traits: Engine's traits fetched from google preferences
  81. (:py:obj:`searx.enginelib.traits.EngineTraits`)
  82. :rtype: dict
  83. :returns:
  84. Py-Dictionary with the key/value pairs:
  85. language:
  86. The language code that is used by google (e.g. ``lang_en`` or
  87. ``lang_zh-TW``)
  88. country:
  89. The country code that is used by google (e.g. ``US`` or ``TW``)
  90. locale:
  91. A instance of :py:obj:`babel.core.Locale` build from the
  92. ``searxng_locale`` value.
  93. subdomain:
  94. Google subdomain :py:obj:`google_domains` that fits to the country
  95. code.
  96. params:
  97. Py-Dictionary with additional request arguments (can be passed to
  98. :py:func:`urllib.parse.urlencode`).
  99. - ``hl`` parameter: specifies the interface language of user interface.
  100. - ``lr`` parameter: restricts search results to documents written in
  101. a particular language.
  102. - ``cr`` parameter: restricts search results to documents
  103. originating in a particular country.
  104. - ``ie`` parameter: sets the character encoding scheme that should
  105. be used to interpret the query string ('utf8').
  106. - ``oe`` parameter: sets the character encoding scheme that should
  107. be used to decode the XML result ('utf8').
  108. headers:
  109. Py-Dictionary with additional HTTP headers (can be passed to
  110. request's headers)
  111. - ``Accept: '*/*``
  112. """
  113. ret_val = {
  114. 'language': None,
  115. 'country': None,
  116. 'subdomain': None,
  117. 'params': {},
  118. 'headers': {},
  119. 'cookies': {},
  120. 'locale': None,
  121. }
  122. sxng_locale = params.get('searxng_locale', 'all')
  123. try:
  124. locale = babel.Locale.parse(sxng_locale, sep='-')
  125. except babel.core.UnknownLocaleError:
  126. locale = None
  127. eng_lang = eng_traits.get_language(sxng_locale, 'lang_en')
  128. lang_code = eng_lang.split('_')[-1] # lang_zh-TW --> zh-TW / lang_en --> en
  129. country = eng_traits.get_region(sxng_locale, eng_traits.all_locale)
  130. # Test zh_hans & zh_hant --> in the topmost links in the result list of list
  131. # TW and HK you should a find wiktionary.org zh_hant link. In the result
  132. # list of zh-CN should not be no hant link instead you should find
  133. # zh.m.wikipedia.org/zh somewhere in the top.
  134. # '!go 日 :zh-TW' --> https://zh.m.wiktionary.org/zh-hant/%E6%97%A5
  135. # '!go 日 :zh-CN' --> https://zh.m.wikipedia.org/zh/%E6%97%A5
  136. ret_val['language'] = eng_lang
  137. ret_val['country'] = country
  138. ret_val['locale'] = locale
  139. ret_val['subdomain'] = eng_traits.custom['supported_domains'].get(country.upper(), 'www.google.com')
  140. # hl parameter:
  141. # The hl parameter specifies the interface language (host language) of
  142. # your user interface. To improve the performance and the quality of your
  143. # search results, you are strongly encouraged to set this parameter
  144. # explicitly.
  145. # https://developers.google.com/custom-search/docs/xml_results#hlsp
  146. # The Interface Language:
  147. # https://developers.google.com/custom-search/docs/xml_results_appendices#interfaceLanguages
  148. # https://github.com/searxng/searxng/issues/2515#issuecomment-1607150817
  149. ret_val['params']['hl'] = f'{lang_code}-{country}'
  150. # lr parameter:
  151. # The lr (language restrict) parameter restricts search results to
  152. # documents written in a particular language.
  153. # https://developers.google.com/custom-search/docs/xml_results#lrsp
  154. # Language Collection Values:
  155. # https://developers.google.com/custom-search/docs/xml_results_appendices#languageCollections
  156. #
  157. # To select 'all' languages an empty 'lr' value is used.
  158. #
  159. # Different to other google services, Google Scholar supports to select more
  160. # than one language. The languages are separated by a pipe '|' (logical OR).
  161. # By example: &lr=lang_zh-TW%7Clang_de selects articles written in
  162. # traditional chinese OR german language.
  163. ret_val['params']['lr'] = eng_lang
  164. if sxng_locale == 'all':
  165. ret_val['params']['lr'] = ''
  166. # cr parameter:
  167. # The cr parameter restricts search results to documents originating in a
  168. # particular country.
  169. # https://developers.google.com/custom-search/docs/xml_results#crsp
  170. # specify a region (country) only if a region is given in the selected
  171. # locale --> https://github.com/searxng/searxng/issues/2672
  172. ret_val['params']['cr'] = ''
  173. if len(sxng_locale.split('-')) > 1:
  174. ret_val['params']['cr'] = 'country' + country
  175. # gl parameter: (mandatory by Google News)
  176. # The gl parameter value is a two-letter country code. For WebSearch
  177. # results, the gl parameter boosts search results whose country of origin
  178. # matches the parameter value. See the Country Codes section for a list of
  179. # valid values.
  180. # Specifying a gl parameter value in WebSearch requests should improve the
  181. # relevance of results. This is particularly true for international
  182. # customers and, even more specifically, for customers in English-speaking
  183. # countries other than the United States.
  184. # https://developers.google.com/custom-search/docs/xml_results#glsp
  185. # https://github.com/searxng/searxng/issues/2515#issuecomment-1606294635
  186. # ret_val['params']['gl'] = country
  187. # ie parameter:
  188. # The ie parameter sets the character encoding scheme that should be used
  189. # to interpret the query string. The default ie value is latin1.
  190. # https://developers.google.com/custom-search/docs/xml_results#iesp
  191. ret_val['params']['ie'] = 'utf8'
  192. # oe parameter:
  193. # The oe parameter sets the character encoding scheme that should be used
  194. # to decode the XML result. The default oe value is latin1.
  195. # https://developers.google.com/custom-search/docs/xml_results#oesp
  196. ret_val['params']['oe'] = 'utf8'
  197. # num parameter:
  198. # The num parameter identifies the number of search results to return.
  199. # The default num value is 10, and the maximum value is 20. If you request
  200. # more than 20 results, only 20 results will be returned.
  201. # https://developers.google.com/custom-search/docs/xml_results#numsp
  202. # HINT: seems to have no effect (tested in google WEB & Images)
  203. # ret_val['params']['num'] = 20
  204. # HTTP headers
  205. ret_val['headers']['Accept'] = '*/*'
  206. # Cookies
  207. # - https://github.com/searxng/searxng/pull/1679#issuecomment-1235432746
  208. # - https://github.com/searxng/searxng/issues/1555
  209. ret_val['cookies']['CONSENT'] = "YES+"
  210. return ret_val
  211. def detect_google_sorry(resp):
  212. if resp.url.host == 'sorry.google.com' or resp.url.path.startswith('/sorry'):
  213. raise SearxEngineCaptchaException()
  214. def request(query, params):
  215. """Google search request"""
  216. # pylint: disable=line-too-long
  217. start = (params['pageno'] - 1) * 10
  218. str_async = ui_async(start)
  219. google_info = get_google_info(params, traits)
  220. logger.debug("ARC_ID: %s", str_async)
  221. # https://www.google.de/search?q=corona&hl=de&lr=lang_de&start=0&tbs=qdr%3Ad&safe=medium
  222. query_url = (
  223. 'https://'
  224. + google_info['subdomain']
  225. + '/search'
  226. + "?"
  227. + urlencode(
  228. {
  229. 'q': query,
  230. **google_info['params'],
  231. 'filter': '0',
  232. 'start': start,
  233. # 'vet': '12ahUKEwik3ZbIzfn7AhXMX_EDHbUDBh0QxK8CegQIARAC..i',
  234. # 'ved': '2ahUKEwik3ZbIzfn7AhXMX_EDHbUDBh0Q_skCegQIARAG',
  235. # 'cs' : 1,
  236. # 'sa': 'N',
  237. # 'yv': 3,
  238. # 'prmd': 'vin',
  239. # 'ei': 'GASaY6TxOcy_xc8PtYeY6AE',
  240. # 'sa': 'N',
  241. # 'sstk': 'AcOHfVkD7sWCSAheZi-0tx_09XDO55gTWY0JNq3_V26cNN-c8lfD45aZYPI8s_Bqp8s57AHz5pxchDtAGCA_cikAWSjy9kw3kgg'
  242. # formally known as use_mobile_ui
  243. 'asearch': 'arc',
  244. 'async': str_async,
  245. }
  246. )
  247. )
  248. if params['time_range'] in time_range_dict:
  249. query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
  250. if params['safesearch']:
  251. query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
  252. params['url'] = query_url
  253. params['cookies'] = google_info['cookies']
  254. params['headers'].update(google_info['headers'])
  255. return params
  256. # =26;[3,"dimg_ZNMiZPCqE4apxc8P3a2tuAQ_137"]a87;data:image/jpeg;base64,/9j/4AAQSkZJRgABA
  257. # ...6T+9Nl4cnD+gr9OK8I56/tX3l86nWYw//2Q==26;
  258. RE_DATA_IMAGE = re.compile(r'"(dimg_[^"]*)"[^;]*;(data:image[^;]*;[^;]*);')
  259. RE_DATA_IMAGE_end = re.compile(r'"(dimg_[^"]*)"[^;]*;(data:image[^;]*;[^;]*)$')
  260. def parse_data_images(text: str):
  261. data_image_map = {}
  262. for img_id, data_image in RE_DATA_IMAGE.findall(text):
  263. end_pos = data_image.rfind('=')
  264. if end_pos > 0:
  265. data_image = data_image[: end_pos + 1]
  266. data_image_map[img_id] = data_image
  267. last = RE_DATA_IMAGE_end.search(text)
  268. if last:
  269. data_image_map[last.group(1)] = last.group(2)
  270. logger.debug('data:image objects --> %s', list(data_image_map.keys()))
  271. return data_image_map
  272. def response(resp) -> EngineResults:
  273. """Get response from google's search request"""
  274. # pylint: disable=too-many-branches, too-many-statements
  275. detect_google_sorry(resp)
  276. data_image_map = parse_data_images(resp.text)
  277. results = EngineResults()
  278. # convert the text to dom
  279. dom = html.fromstring(resp.text)
  280. # results --> answer
  281. answer_list = eval_xpath(dom, '//div[contains(@class, "LGOjhe")]')
  282. for item in answer_list:
  283. for bubble in eval_xpath(item, './/div[@class="nnFGuf"]'):
  284. bubble.drop_tree()
  285. results.add(
  286. results.types.Answer(
  287. answer=extract_text(item),
  288. url=(eval_xpath(item, '../..//a/@href') + [None])[0],
  289. )
  290. )
  291. # parse results
  292. for result in eval_xpath_list(dom, './/div[contains(@jscontroller, "SC7lYd")]'):
  293. # pylint: disable=too-many-nested-blocks
  294. try:
  295. title_tag = eval_xpath_getindex(result, './/a/h3[1]', 0, default=None)
  296. if title_tag is None:
  297. # this not one of the common google results *section*
  298. logger.debug('ignoring item from the result_xpath list: missing title')
  299. continue
  300. title = extract_text(title_tag)
  301. url = eval_xpath_getindex(result, './/a[h3]/@href', 0, None)
  302. if url is None:
  303. logger.debug('ignoring item from the result_xpath list: missing url of title "%s"', title)
  304. continue
  305. content_nodes = eval_xpath(result, './/div[contains(@data-sncf, "1")]')
  306. for item in content_nodes:
  307. for script in item.xpath(".//script"):
  308. script.getparent().remove(script)
  309. content = extract_text(content_nodes)
  310. if not content:
  311. logger.debug('ignoring item from the result_xpath list: missing content of title "%s"', title)
  312. continue
  313. thumbnail = content_nodes[0].xpath('.//img/@src')
  314. if thumbnail:
  315. thumbnail = thumbnail[0]
  316. if thumbnail.startswith('data:image'):
  317. img_id = content_nodes[0].xpath('.//img/@id')
  318. if img_id:
  319. thumbnail = data_image_map.get(img_id[0])
  320. else:
  321. thumbnail = None
  322. results.append({'url': url, 'title': title, 'content': content, 'thumbnail': thumbnail})
  323. except Exception as e: # pylint: disable=broad-except
  324. logger.error(e, exc_info=True)
  325. continue
  326. # parse suggestion
  327. for suggestion in eval_xpath_list(dom, suggestion_xpath):
  328. # append suggestion
  329. results.append({'suggestion': extract_text(suggestion)})
  330. # return results
  331. return results
  332. # get supported languages from their site
  333. skip_countries = [
  334. # official language of google-country not in google-languages
  335. 'AL', # Albanien (sq)
  336. 'AZ', # Aserbaidschan (az)
  337. 'BD', # Bangladesch (bn)
  338. 'BN', # Brunei Darussalam (ms)
  339. 'BT', # Bhutan (dz)
  340. 'ET', # Äthiopien (am)
  341. 'GE', # Georgien (ka, os)
  342. 'GL', # Grönland (kl)
  343. 'KH', # Kambodscha (km)
  344. 'LA', # Laos (lo)
  345. 'LK', # Sri Lanka (si, ta)
  346. 'ME', # Montenegro (sr)
  347. 'MK', # Nordmazedonien (mk, sq)
  348. 'MM', # Myanmar (my)
  349. 'MN', # Mongolei (mn)
  350. 'MV', # Malediven (dv) // dv_MV is unknown by babel
  351. 'MY', # Malaysia (ms)
  352. 'NP', # Nepal (ne)
  353. 'TJ', # Tadschikistan (tg)
  354. 'TM', # Turkmenistan (tk)
  355. 'UZ', # Usbekistan (uz)
  356. ]
  357. def fetch_traits(engine_traits: EngineTraits, add_domains: bool = True):
  358. """Fetch languages from Google."""
  359. # pylint: disable=import-outside-toplevel, too-many-branches
  360. engine_traits.custom['supported_domains'] = {}
  361. resp = get('https://www.google.com/preferences')
  362. if not resp.ok: # type: ignore
  363. raise RuntimeError("Response from Google's preferences is not OK.")
  364. dom = html.fromstring(resp.text.replace('<?xml version="1.0" encoding="UTF-8"?>', ''))
  365. # supported language codes
  366. lang_map = {'no': 'nb'}
  367. for x in eval_xpath_list(dom, "//select[@name='hl']/option"):
  368. eng_lang = x.get("value")
  369. try:
  370. locale = babel.Locale.parse(lang_map.get(eng_lang, eng_lang), sep='-')
  371. except babel.UnknownLocaleError:
  372. print("INFO: google UI language %s (%s) is unknown by babel" % (eng_lang, x.text.split("(")[0].strip()))
  373. continue
  374. sxng_lang = language_tag(locale)
  375. conflict = engine_traits.languages.get(sxng_lang)
  376. if conflict:
  377. if conflict != eng_lang:
  378. print("CONFLICT: babel %s --> %s, %s" % (sxng_lang, conflict, eng_lang))
  379. continue
  380. engine_traits.languages[sxng_lang] = 'lang_' + eng_lang
  381. # alias languages
  382. engine_traits.languages['zh'] = 'lang_zh-CN'
  383. # supported region codes
  384. for x in eval_xpath_list(dom, "//select[@name='gl']/option"):
  385. eng_country = x.get("value")
  386. if eng_country in skip_countries:
  387. continue
  388. if eng_country == 'ZZ':
  389. engine_traits.all_locale = 'ZZ'
  390. continue
  391. sxng_locales = get_official_locales(eng_country, engine_traits.languages.keys(), regional=True)
  392. if not sxng_locales:
  393. print("ERROR: can't map from google country %s (%s) to a babel region." % (x.get('data-name'), eng_country))
  394. continue
  395. for sxng_locale in sxng_locales:
  396. engine_traits.regions[region_tag(sxng_locale)] = eng_country
  397. # alias regions
  398. engine_traits.regions['zh-CN'] = 'HK'
  399. # supported domains
  400. if add_domains:
  401. resp = get('https://www.google.com/supported_domains')
  402. if not resp.ok: # type: ignore
  403. raise RuntimeError("Response from https://www.google.com/supported_domains is not OK.")
  404. for domain in resp.text.split(): # type: ignore
  405. domain = domain.strip()
  406. if not domain or domain in [
  407. '.google.com',
  408. ]:
  409. continue
  410. region = domain.split('.')[-1].upper()
  411. engine_traits.custom['supported_domains'][region] = 'www' + domain # type: ignore
  412. if region == 'HK':
  413. # There is no google.cn, we use .com.hk for zh-CN
  414. engine_traits.custom['supported_domains']['CN'] = 'www' + domain # type: ignore