google.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Google (Web)
  4. For detailed description of the *REST-full* API see: `Query Parameter
  5. Definitions`_.
  6. .. _Query Parameter Definitions:
  7. https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
  8. """
  9. # pylint: disable=invalid-name, missing-function-docstring
  10. from urllib.parse import urlencode
  11. from lxml import html
  12. from searx import logger
  13. from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
  14. from searx.exceptions import SearxEngineCaptchaException
  15. logger = logger.getChild('google engine')
  16. # about
  17. about = {
  18. "website": 'https://www.google.com',
  19. "wikidata_id": 'Q9366',
  20. "official_api_documentation": 'https://developers.google.com/custom-search/',
  21. "use_official_api": False,
  22. "require_api_key": False,
  23. "results": 'HTML',
  24. }
  25. # engine dependent config
  26. categories = ['general']
  27. paging = True
  28. time_range_support = True
  29. safesearch = True
  30. use_mobile_ui = False
  31. supported_languages_url = 'https://www.google.com/preferences?#languages'
  32. # based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
  33. google_domains = {
  34. 'BG': 'google.bg', # Bulgaria
  35. 'CZ': 'google.cz', # Czech Republic
  36. 'DE': 'google.de', # Germany
  37. 'DK': 'google.dk', # Denmark
  38. 'AT': 'google.at', # Austria
  39. 'CH': 'google.ch', # Switzerland
  40. 'GR': 'google.gr', # Greece
  41. 'AU': 'google.com.au', # Australia
  42. 'CA': 'google.ca', # Canada
  43. 'GB': 'google.co.uk', # United Kingdom
  44. 'ID': 'google.co.id', # Indonesia
  45. 'IE': 'google.ie', # Ireland
  46. 'IN': 'google.co.in', # India
  47. 'MY': 'google.com.my', # Malaysia
  48. 'NZ': 'google.co.nz', # New Zealand
  49. 'PH': 'google.com.ph', # Philippines
  50. 'SG': 'google.com.sg', # Singapore
  51. 'US': 'google.com', # United States (google.us) redirects to .com
  52. 'ZA': 'google.co.za', # South Africa
  53. 'AR': 'google.com.ar', # Argentina
  54. 'CL': 'google.cl', # Chile
  55. 'ES': 'google.es', # Spain
  56. 'MX': 'google.com.mx', # Mexico
  57. 'EE': 'google.ee', # Estonia
  58. 'FI': 'google.fi', # Finland
  59. 'BE': 'google.be', # Belgium
  60. 'FR': 'google.fr', # France
  61. 'IL': 'google.co.il', # Israel
  62. 'HR': 'google.hr', # Croatia
  63. 'HU': 'google.hu', # Hungary
  64. 'IT': 'google.it', # Italy
  65. 'JP': 'google.co.jp', # Japan
  66. 'KR': 'google.co.kr', # South Korea
  67. 'LT': 'google.lt', # Lithuania
  68. 'LV': 'google.lv', # Latvia
  69. 'NO': 'google.no', # Norway
  70. 'NL': 'google.nl', # Netherlands
  71. 'PL': 'google.pl', # Poland
  72. 'BR': 'google.com.br', # Brazil
  73. 'PT': 'google.pt', # Portugal
  74. 'RO': 'google.ro', # Romania
  75. 'RU': 'google.ru', # Russia
  76. 'SK': 'google.sk', # Slovakia
  77. 'SI': 'google.si', # Slovenia
  78. 'SE': 'google.se', # Sweden
  79. 'TH': 'google.co.th', # Thailand
  80. 'TR': 'google.com.tr', # Turkey
  81. 'UA': 'google.com.ua', # Ukraine
  82. 'CN': 'google.com.hk', # There is no google.cn, we use .com.hk for zh-CN
  83. 'HK': 'google.com.hk', # Hong Kong
  84. 'TW': 'google.com.tw' # Taiwan
  85. }
  86. time_range_dict = {
  87. 'day': 'd',
  88. 'week': 'w',
  89. 'month': 'm',
  90. 'year': 'y'
  91. }
  92. # Filter results. 0: None, 1: Moderate, 2: Strict
  93. filter_mapping = {
  94. 0: 'off',
  95. 1: 'medium',
  96. 2: 'high'
  97. }
  98. # specific xpath variables
  99. # ------------------------
  100. # google results are grouped into <div class="g" ../>
  101. results_xpath = '//div[@class="g"]'
  102. # google *sections* are no usual *results*, we ignore them
  103. g_section_with_header = './g-section-with-header'
  104. # the title is a h3 tag relative to the result group
  105. title_xpath = './/h3[1]'
  106. # in the result group there is <div class="yuRUbf" ../> it's first child is a <a
  107. # href=...>
  108. href_xpath = './/div[@class="yuRUbf"]//a/@href'
  109. # in the result group there is <div class="IsZvec" ../> containing he *content*
  110. content_xpath = './/div[@class="IsZvec"]'
  111. # Suggestions are links placed in a *card-section*, we extract only the text
  112. # from the links not the links itself.
  113. suggestion_xpath = '//div[contains(@class, "card-section")]//a'
  114. # Since google does *auto-correction* on the first query these are not really
  115. # *spelling suggestions*, we use them anyway.
  116. spelling_suggestion_xpath = '//div[@class="med"]/p/a'
  117. def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
  118. """Composing various language properties for the google engines.
  119. This function is called by the various google engines (google itself,
  120. google-images, -news, -scholar, -videos).
  121. :param dict param: request parameters of the engine
  122. :param list lang_list: list of supported languages of the engine
  123. :py:obj:`ENGINES_LANGUAGES[engine-name] <searx.data.ENGINES_LANGUAGES>`
  124. :param dict lang_list: custom aliases for non standard language codes
  125. (used when calling :py:func:`searx.utils.match_language)
  126. :param bool supported_any_language: When a language is not specified, the
  127. language interpretation is left up to Google to decide how the search
  128. results should be delivered. This argument is ``True`` for the google
  129. engine and ``False`` for the other engines (google-images, -news,
  130. -scholar, -videos).
  131. :rtype: dict
  132. :returns:
  133. Py-Dictionary with the key/value pairs:
  134. language:
  135. Return value from :py:func:`searx.utils.match_language
  136. country:
  137. The country code (e.g. US, AT, CA, FR, DE ..)
  138. subdomain:
  139. Google subdomain :py:obj:`google_domains` that fits to the country
  140. code.
  141. params:
  142. Py-Dictionary with additional request arguments (can be passed to
  143. :py:func:`urllib.parse.urlencode`).
  144. headers:
  145. Py-Dictionary with additional HTTP headers (can be passed to
  146. request's headers)
  147. """
  148. ret_val = {
  149. 'language' : None,
  150. 'country' : None,
  151. 'subdomain' : None,
  152. 'params' : {},
  153. 'headers' : {},
  154. }
  155. # language ...
  156. _lang = params['language']
  157. _any_language = _lang.lower() == 'all'
  158. if _any_language:
  159. _lang = 'en-US'
  160. language = match_language(_lang, lang_list, custom_aliases)
  161. ret_val['language'] = language
  162. # country ...
  163. _l = _lang.split('-')
  164. if len(_l) == 2:
  165. country = _l[1]
  166. else:
  167. country = _l[0].upper()
  168. if country == 'EN':
  169. country = 'US'
  170. ret_val['country'] = country
  171. # subdomain ...
  172. ret_val['subdomain'] = 'www.' + google_domains.get(country.upper(), 'google.com')
  173. # params & headers
  174. lang_country = '%s-%s' % (language, country) # (en-US, en-EN, de-DE, de-AU, fr-FR ..)
  175. # hl parameter:
  176. # https://developers.google.com/custom-search/docs/xml_results#hlsp The
  177. # Interface Language:
  178. # https://developers.google.com/custom-search/docs/xml_results_appendices#interfaceLanguages
  179. ret_val['params']['hl'] = lang_list.get(lang_country, language)
  180. # lr parameter:
  181. # The lr (language restrict) parameter restricts search results to
  182. # documents written in a particular language.
  183. # https://developers.google.com/custom-search/docs/xml_results#lrsp
  184. # Language Collection Values:
  185. # https://developers.google.com/custom-search/docs/xml_results_appendices#languageCollections
  186. if _any_language and supported_any_language:
  187. # interpretation is left up to Google (based on whoogle)
  188. #
  189. # - add parameter ``source=lnt``
  190. # - don't use parameter ``lr``
  191. # - don't add a ``Accept-Language`` HTTP header.
  192. ret_val['params']['source'] = 'lnt'
  193. else:
  194. # restricts search results to documents written in a particular
  195. # language.
  196. ret_val['params']['lr'] = "lang_" + lang_list.get(lang_country, language)
  197. # Accept-Language: fr-CH, fr;q=0.8, en;q=0.6, *;q=0.5
  198. ret_val['headers']['Accept-Language'] = ','.join([
  199. lang_country,
  200. language + ';q=0.8,',
  201. 'en;q=0.6',
  202. '*;q=0.5',
  203. ])
  204. return ret_val
  205. def detect_google_sorry(resp):
  206. if resp.url.host == 'sorry.google.com' or resp.url.path.startswith('/sorry'):
  207. raise SearxEngineCaptchaException()
  208. def request(query, params):
  209. """Google search request"""
  210. offset = (params['pageno'] - 1) * 10
  211. lang_info = get_lang_info(
  212. # pylint: disable=undefined-variable
  213. params, supported_languages, language_aliases, True
  214. )
  215. additional_parameters = {}
  216. if use_mobile_ui:
  217. additional_parameters = {
  218. 'asearch': "arc",
  219. 'async': 'arc_id:srp_510,ffilt:all,ve_name:MoreResultsContainer,next_id:srp_5,use_ac:true,_id:arc-srp_510,_pms:qs,_fmt:pc' # pylint: disable=line-too-long
  220. }
  221. # https://www.google.de/search?q=corona&hl=de&lr=lang_de&start=0&tbs=qdr%3Ad&safe=medium
  222. query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
  223. 'q': query,
  224. **lang_info['params'],
  225. 'ie': "utf8",
  226. 'oe': "utf8",
  227. 'start': offset,
  228. **additional_parameters,
  229. })
  230. if params['time_range'] in time_range_dict:
  231. query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
  232. if params['safesearch']:
  233. query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
  234. params['url'] = query_url
  235. params['headers'].update(lang_info['headers'])
  236. if use_mobile_ui:
  237. params['headers']['Accept'] = '*/*'
  238. else:
  239. params['headers']['Accept'] = (
  240. 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  241. )
  242. return params
  243. def response(resp):
  244. """Get response from google's search request"""
  245. detect_google_sorry(resp)
  246. results = []
  247. # convert the text to dom
  248. dom = html.fromstring(resp.text)
  249. # results --> answer
  250. answer = eval_xpath(dom, '//div[contains(@class, "LGOjhe")]//text()')
  251. if answer:
  252. results.append({'answer': ' '.join(answer)})
  253. else:
  254. logger.debug("did not find 'answer'")
  255. # results --> number_of_results
  256. if not use_mobile_ui:
  257. try:
  258. _txt = eval_xpath_getindex(dom, '//div[@id="result-stats"]//text()', 0)
  259. _digit = ''.join([n for n in _txt if n.isdigit()])
  260. number_of_results = int(_digit)
  261. results.append({'number_of_results': number_of_results})
  262. except Exception as e: # pylint: disable=broad-except
  263. logger.debug("did not 'number_of_results'")
  264. logger.error(e, exc_info=True)
  265. # parse results
  266. for result in eval_xpath_list(dom, results_xpath):
  267. # google *sections*
  268. if extract_text(eval_xpath(result, g_section_with_header)):
  269. logger.debug("ingoring <g-section-with-header>")
  270. continue
  271. try:
  272. title_tag = eval_xpath_getindex(result, title_xpath, 0, default=None)
  273. if title_tag is None:
  274. # this not one of the common google results *section*
  275. logger.debug('ingoring <div class="g" ../> section: missing title')
  276. continue
  277. title = extract_text(title_tag)
  278. url = eval_xpath_getindex(result, href_xpath, 0, None)
  279. if url is None:
  280. continue
  281. content = extract_text(eval_xpath_getindex(result, content_xpath, 0, default=None), allow_none=True)
  282. results.append({
  283. 'url': url,
  284. 'title': title,
  285. 'content': content
  286. })
  287. except Exception as e: # pylint: disable=broad-except
  288. logger.error(e, exc_info=True)
  289. # from lxml import etree
  290. # logger.debug(etree.tostring(result, pretty_print=True))
  291. # import pdb
  292. # pdb.set_trace()
  293. continue
  294. # parse suggestion
  295. for suggestion in eval_xpath_list(dom, suggestion_xpath):
  296. # append suggestion
  297. results.append({'suggestion': extract_text(suggestion)})
  298. for correction in eval_xpath_list(dom, spelling_suggestion_xpath):
  299. results.append({'correction': extract_text(correction)})
  300. # return results
  301. return results
  302. # get supported languages from their site
  303. def _fetch_supported_languages(resp):
  304. ret_val = {}
  305. dom = html.fromstring(resp.text)
  306. radio_buttons = eval_xpath_list(dom, '//*[@id="langSec"]//input[@name="lr"]')
  307. for x in radio_buttons:
  308. name = x.get("data-name")
  309. code = x.get("value").split('_')[-1]
  310. ret_val[code] = {"name": name}
  311. return ret_val