Browse Source

Merge branch 'google-images-fix'

Adam Tauber 4 years ago
parent
commit
9f5cd28dba
2 changed files with 390 additions and 386 deletions
  1. 213 317
      searx/engines/google.py
  2. 177 69
      searx/engines/google_images.py

+ 213 - 317
searx/engines/google.py

@@ -1,210 +1,207 @@
-#  Google (Web)
-#
-# @website     https://www.google.com
-# @provide-api yes (https://developers.google.com/custom-search/)
-#
-# @using-api   no
-# @results     HTML
-# @stable      no (HTML can change)
-# @parse       url, title, content, suggestion
-
-import re
+# SPDX-License-Identifier: AGPL-3.0-or-later
+"""Google (Web)
+
+:website:     https://www.google.com
+:provide-api: yes (https://developers.google.com/custom-search/)
+:using-api:   not the offical, since it needs registration to another service
+:results:     HTML
+:stable:      no
+:parse:       url, title, content, number_of_results, answer, suggestion, correction
+
+For detailed description of the *REST-full* API see: `Query Parameter
+Definitions`_.
+
+.. _Query Parameter Definitions:
+   https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
+
+"""
+
+# pylint: disable=invalid-name, missing-function-docstring
+
+from lxml import html
 from flask_babel import gettext
 from flask_babel import gettext
-from lxml import html, etree
-from searx.engines.xpath import extract_text, extract_url
+from searx.engines.xpath import extract_text
 from searx import logger
 from searx import logger
-from searx.url_utils import urlencode, urlparse, parse_qsl
+from searx.url_utils import urlencode, urlparse
 from searx.utils import match_language, eval_xpath
 from searx.utils import match_language, eval_xpath
 
 
 logger = logger.getChild('google engine')
 logger = logger.getChild('google engine')
 
 
-
 # engine dependent config
 # engine dependent config
 categories = ['general']
 categories = ['general']
 paging = True
 paging = True
 language_support = True
 language_support = True
-use_locale_domain = True
 time_range_support = True
 time_range_support = True
+safesearch = True
+supported_languages_url = 'https://www.google.com/preferences?#languages'
 
 
 # based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
 # based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
-default_hostname = 'www.google.com'
-
-country_to_hostname = {
-    'BG': 'www.google.bg',  # Bulgaria
-    'CZ': 'www.google.cz',  # Czech Republic
-    'DE': 'www.google.de',  # Germany
-    'DK': 'www.google.dk',  # Denmark
-    'AT': 'www.google.at',  # Austria
-    'CH': 'www.google.ch',  # Switzerland
-    'GR': 'www.google.gr',  # Greece
-    'AU': 'www.google.com.au',  # Australia
-    'CA': 'www.google.ca',  # Canada
-    'GB': 'www.google.co.uk',  # United Kingdom
-    'ID': 'www.google.co.id',  # Indonesia
-    'IE': 'www.google.ie',  # Ireland
-    'IN': 'www.google.co.in',  # India
-    'MY': 'www.google.com.my',  # Malaysia
-    'NZ': 'www.google.co.nz',  # New Zealand
-    'PH': 'www.google.com.ph',  # Philippines
-    'SG': 'www.google.com.sg',  # Singapore
-    # 'US': 'www.google.us',  # United States, redirect to .com
-    'ZA': 'www.google.co.za',  # South Africa
-    'AR': 'www.google.com.ar',  # Argentina
-    'CL': 'www.google.cl',  # Chile
-    'ES': 'www.google.es',  # Spain
-    'MX': 'www.google.com.mx',  # Mexico
-    'EE': 'www.google.ee',  # Estonia
-    'FI': 'www.google.fi',  # Finland
-    'BE': 'www.google.be',  # Belgium
-    'FR': 'www.google.fr',  # France
-    'IL': 'www.google.co.il',  # Israel
-    'HR': 'www.google.hr',  # Croatia
-    'HU': 'www.google.hu',  # Hungary
-    'IT': 'www.google.it',  # Italy
-    'JP': 'www.google.co.jp',  # Japan
-    'KR': 'www.google.co.kr',  # South Korea
-    'LT': 'www.google.lt',  # Lithuania
-    'LV': 'www.google.lv',  # Latvia
-    'NO': 'www.google.no',  # Norway
-    'NL': 'www.google.nl',  # Netherlands
-    'PL': 'www.google.pl',  # Poland
-    'BR': 'www.google.com.br',  # Brazil
-    'PT': 'www.google.pt',  # Portugal
-    'RO': 'www.google.ro',  # Romania
-    'RU': 'www.google.ru',  # Russia
-    'SK': 'www.google.sk',  # Slovakia
-    'SI': 'www.google.si',  # Slovenia
-    'SE': 'www.google.se',  # Sweden
-    'TH': 'www.google.co.th',  # Thailand
-    'TR': 'www.google.com.tr',  # Turkey
-    'UA': 'www.google.com.ua',  # Ukraine
-    # 'CN': 'www.google.cn',  # China, only from China ?
-    'HK': 'www.google.com.hk',  # Hong Kong
-    'TW': 'www.google.com.tw'  # Taiwan
+google_domains = {
+    'BG': 'google.bg',      # Bulgaria
+    'CZ': 'google.cz',      # Czech Republic
+    'DE': 'google.de',      # Germany
+    'DK': 'google.dk',      # Denmark
+    'AT': 'google.at',      # Austria
+    'CH': 'google.ch',      # Switzerland
+    'GR': 'google.gr',      # Greece
+    'AU': 'google.com.au',  # Australia
+    'CA': 'google.ca',      # Canada
+    'GB': 'google.co.uk',   # United Kingdom
+    'ID': 'google.co.id',   # Indonesia
+    'IE': 'google.ie',      # Ireland
+    'IN': 'google.co.in',   # India
+    'MY': 'google.com.my',  # Malaysia
+    'NZ': 'google.co.nz',   # New Zealand
+    'PH': 'google.com.ph',  # Philippines
+    'SG': 'google.com.sg',  # Singapore
+    # 'US': 'google.us',    # United States, redirect to .com
+    'ZA': 'google.co.za',   # South Africa
+    'AR': 'google.com.ar',  # Argentina
+    'CL': 'google.cl',      # Chile
+    'ES': 'google.es',      # Spain
+    'MX': 'google.com.mx',  # Mexico
+    'EE': 'google.ee',      # Estonia
+    'FI': 'google.fi',      # Finland
+    'BE': 'google.be',      # Belgium
+    'FR': 'google.fr',      # France
+    'IL': 'google.co.il',   # Israel
+    'HR': 'google.hr',      # Croatia
+    'HU': 'google.hu',      # Hungary
+    'IT': 'google.it',      # Italy
+    'JP': 'google.co.jp',   # Japan
+    'KR': 'google.co.kr',   # South Korea
+    'LT': 'google.lt',      # Lithuania
+    'LV': 'google.lv',      # Latvia
+    'NO': 'google.no',      # Norway
+    'NL': 'google.nl',      # Netherlands
+    'PL': 'google.pl',      # Poland
+    'BR': 'google.com.br',  # Brazil
+    'PT': 'google.pt',      # Portugal
+    'RO': 'google.ro',      # Romania
+    'RU': 'google.ru',      # Russia
+    'SK': 'google.sk',      # Slovakia
+    'SI': 'google.si',      # Slovenia
+    'SE': 'google.se',      # Sweden
+    'TH': 'google.co.th',   # Thailand
+    'TR': 'google.com.tr',  # Turkey
+    'UA': 'google.com.ua',  # Ukraine
+    # 'CN': 'google.cn',    # China, only from China ?
+    'HK': 'google.com.hk',  # Hong Kong
+    'TW': 'google.com.tw'   # Taiwan
 }
 }
 
 
-# osm
-url_map = 'https://www.openstreetmap.org/'\
-    + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
-
-# search-url
-search_path = '/search'
-search_url = ('https://{hostname}' +
-              search_path +
-              '?{query}&start={offset}&gws_rd=cr&gbv=1&lr={lang}&hl={lang_short}&ei=x')
-
-time_range_search = "&tbs=qdr:{range}"
-time_range_dict = {'day': 'd',
-                   'week': 'w',
-                   'month': 'm',
-                   'year': 'y'}
-
-# other URLs
-map_hostname_start = 'maps.google.'
-maps_path = '/maps'
-redirect_path = '/url'
-images_path = '/images'
-supported_languages_url = 'https://www.google.com/preferences?#languages'
+time_range_dict = {
+    'day': 'd',
+    'week': 'w',
+    'month': 'm',
+    'year': 'y'
+}
+
+# Filter results. 0: None, 1: Moderate, 2: Strict
+filter_mapping = {
+    0 : 'off',
+    1 : 'medium',
+    2 : 'high'
+}
 
 
 # specific xpath variables
 # specific xpath variables
-results_xpath = '//div[contains(@class, "ZINbbc")]'
-url_xpath = './/div[@class="kCrYT"][1]/a/@href'
-title_xpath = './/div[@class="kCrYT"][1]/a/div[1]'
-content_xpath = './/div[@class="kCrYT"][2]//div[contains(@class, "BNeawe")]//div[contains(@class, "BNeawe")]'
-suggestion_xpath = '//div[contains(@class, "ZINbbc")][last()]//div[@class="rVLSBd"]/a//div[contains(@class, "BNeawe")]'
-spelling_suggestion_xpath = '//div[@id="scc"]//a'
-
-# map : detail location
-map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()'
-map_phone_xpath = './/div[@class="s"]//table//td[2]/span/span'
-map_website_url_xpath = 'h3[2]/a/@href'
-map_website_title_xpath = 'h3[2]'
-
-# map : near the location
-map_near = 'table[@class="ts"]//tr'
-map_near_title = './/h4'
-map_near_url = './/h4/a/@href'
-map_near_phone = './/span[@class="nobr"]'
-
-# images
-images_xpath = './/div/a'
-image_url_xpath = './@href'
-image_img_src_xpath = './img/@src'
-
-# property names
-# FIXME : no translation
-property_address = "Address"
-property_phone = "Phone number"
-
-
-# remove google-specific tracking-url
-def parse_url(url_string, google_hostname):
-    # sanity check
-    if url_string is None:
-        return url_string
-
-    # normal case
-    parsed_url = urlparse(url_string)
-    if (parsed_url.netloc in [google_hostname, '']
-            and parsed_url.path == redirect_path):
-        query = dict(parse_qsl(parsed_url.query))
-        return query['q']
-    else:
-        return url_string
+# ------------------------
+
+# google results are grouped into <div class="g" ../>
+results_xpath = '//div[@class="g"]'
+
+# google *sections* are no usual *results*, we ignore them
+g_section_with_header='./g-section-with-header'
+
+# the title is a h3 tag relative to the result group
+title_xpath = './/h3[1]'
 
 
+# in the result group there is <div class="r" ../> it's first child is a <a
+# href=...> (on some results, the <a> is the first "descendant", not ""child")
+href_xpath = './/div[@class="r"]//a/@href'
+
+# in the result group there is <div class="s" ../> containing he *content*
+content_xpath = './/div[@class="s"]'
+
+# Suggestions are links placed in a *card-section*, we extract only the text
+# from the links not the links itself.
+suggestion_xpath = '//div[contains(@class, "card-section")]//a'
+
+# Since google does *auto-correction* on the first query these are not really
+# *spelling suggestions*, we use them anyway.
+spelling_suggestion_xpath = '//div[@class="med"]/p/a'
 
 
-# returns extract_text on the first result selected by the xpath or None
 def extract_text_from_dom(result, xpath):
 def extract_text_from_dom(result, xpath):
+    """returns extract_text on the first result selected by the xpath or None"""
     r = eval_xpath(result, xpath)
     r = eval_xpath(result, xpath)
     if len(r) > 0:
     if len(r) > 0:
         return extract_text(r[0])
         return extract_text(r[0])
     return None
     return None
 
 
-
-# do search-request
-def request(query, params):
-    offset = (params['pageno'] - 1) * 10
-
-    if params['language'] == 'all' or params['language'] == 'en-US':
-        language = 'en-GB'
-    else:
-        language = match_language(params['language'], supported_languages, language_aliases)
+def get_lang_country(params, lang_list, custom_aliases):
+    """Returns a tuple with *langauage* on its first and *country* on its second
+    position."""
+    language = params['language']
+    if language == 'all':
+        language = 'en-US'
 
 
     language_array = language.split('-')
     language_array = language.split('-')
-    if params['language'].find('-') > 0:
-        country = params['language'].split('-')[1]
-    elif len(language_array) == 2:
+
+    if len(language_array) == 2:
         country = language_array[1]
         country = language_array[1]
     else:
     else:
-        country = 'US'
+        country = language_array[0].upper()
 
 
-    url_lang = 'lang_' + language
+    language = match_language(language, lang_list, custom_aliases)
+    lang_country = '%s-%s' % (language, country)
+    if lang_country == 'en-EN':
+        lang_country = 'en'
 
 
-    if use_locale_domain:
-        google_hostname = country_to_hostname.get(country.upper(), default_hostname)
-    else:
-        google_hostname = default_hostname
-
-    # original format: ID=3e2b6616cee08557:TM=5556667580:C=r:IP=4.1.12.5-:S=23ASdf0soFgF2d34dfgf-_22JJOmHdfgg
-    params['cookies']['GOOGLE_ABUSE_EXEMPTION'] = 'x'
-    params['url'] = search_url.format(offset=offset,
-                                      query=urlencode({'q': query}),
-                                      hostname=google_hostname,
-                                      lang=url_lang,
-                                      lang_short=language)
-    if params['time_range'] in time_range_dict:
-        params['url'] += time_range_search.format(range=time_range_dict[params['time_range']])
+    return language, country, lang_country
 
 
-    params['headers']['Accept-Language'] = language + ',' + language + '-' + country
-    params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
+def request(query, params):
+    """Google search request"""
 
 
-    params['google_hostname'] = google_hostname
+    offset = (params['pageno'] - 1) * 10
+    language, country, lang_country = get_lang_country(
+        # pylint: disable=undefined-variable
+        params, supported_languages, language_aliases
+    )
+    subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
+
+    # https://www.google.de/search?q=corona&hl=de-DE&lr=lang_de&start=0&tbs=qdr%3Ad&safe=medium
+    query_url = 'https://'+ subdomain + '/search' + "?" + urlencode({
+        'q': query,
+        'hl': lang_country,
+        'lr': "lang_" + language,
+        'ie': "utf8",
+        'oe': "utf8",
+        'start': offset,
+    })
 
 
-    return params
+    if params['time_range'] in time_range_dict:
+        query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
+    if params['safesearch']:
+        query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
+
+    params['url'] = query_url
+    logger.debug("query_url --> %s", query_url)
+
+    # en-US,en;q=0.8,en;q=0.5
+    params['headers']['Accept-Language'] = (
+        lang_country + ',' + language + ';q=0.8,' + language + ';q=0.5'
+        )
+    logger.debug("HTTP header Accept-Language --> %s",
+                 params['headers']['Accept-Language'])
+    params['headers']['Accept'] = (
+        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
+        )
+    #params['google_subdomain'] = subdomain
 
 
+    return params
 
 
-# get response from search-request
 def response(resp):
 def response(resp):
+    """Get response from google's search request"""
     results = []
     results = []
 
 
     # detect google sorry
     # detect google sorry
@@ -215,68 +212,53 @@ def response(resp):
     if resp_url.path.startswith('/sorry'):
     if resp_url.path.startswith('/sorry'):
         raise RuntimeWarning(gettext('CAPTCHA required'))
         raise RuntimeWarning(gettext('CAPTCHA required'))
 
 
-    # which hostname ?
-    google_hostname = resp.search_params.get('google_hostname')
-    google_url = "https://" + google_hostname
+    # which subdomain ?
+    # subdomain = resp.search_params.get('google_subdomain')
 
 
     # convert the text to dom
     # convert the text to dom
     dom = html.fromstring(resp.text)
     dom = html.fromstring(resp.text)
 
 
-    instant_answer = eval_xpath(dom, '//div[@id="_vBb"]//text()')
-    if instant_answer:
-        results.append({'answer': u' '.join(instant_answer)})
+    # results --> answer
+    answer = eval_xpath(dom, '//div[contains(@class, "LGOjhe")]//text()')
+    if answer:
+        results.append({'answer': ' '.join(answer)})
+    else:
+        logger.debug("did not found 'answer'")
+
+    # results --> number_of_results
     try:
     try:
-        results_num = int(eval_xpath(dom, '//div[@id="resultStats"]//text()')[0]
-                          .split()[1].replace(',', ''))
-        results.append({'number_of_results': results_num})
-    except:
-        pass
+        _txt = eval_xpath(dom, '//div[@id="result-stats"]//text()')[0]
+        _digit = ''.join([n for n in _txt if n.isdigit()])
+        number_of_results = int(_digit)
+        results.append({'number_of_results': number_of_results})
+
+    except Exception as e:  # pylint: disable=broad-except
+        logger.debug("did not 'number_of_results'")
+        logger.error(e, exc_info=True)
 
 
     # parse results
     # parse results
     for result in eval_xpath(dom, results_xpath):
     for result in eval_xpath(dom, results_xpath):
+
+        # google *sections*
+        if extract_text(eval_xpath(result, g_section_with_header)):
+            logger.debug("ingoring <g-section-with-header>")
+            continue
+
         try:
         try:
             title = extract_text(eval_xpath(result, title_xpath)[0])
             title = extract_text(eval_xpath(result, title_xpath)[0])
-            url = parse_url(extract_url(eval_xpath(result, url_xpath), google_url), google_hostname)
-            parsed_url = urlparse(url, google_hostname)
-
-            # map result
-            if parsed_url.netloc == google_hostname:
-                # TODO fix inside links
-                continue
-                # if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start):
-                #     print "yooooo"*30
-                #     x = eval_xpath(result, map_near)
-                #     if len(x) > 0:
-                #         # map : near the location
-                #         results = results + parse_map_near(parsed_url, x, google_hostname)
-                #     else:
-                #         # map : detail about a location
-                #         results = results + parse_map_detail(parsed_url, result, google_hostname)
-                # # google news
-                # elif parsed_url.path == search_path:
-                #     # skipping news results
-                #     pass
-
-                # # images result
-                # elif parsed_url.path == images_path:
-                #     # only thumbnail image provided,
-                #     # so skipping image results
-                #     # results = results + parse_images(result, google_hostname)
-                #     pass
-
-            else:
-                # normal result
-                content = extract_text_from_dom(result, content_xpath)
-                if content is None:
-                    continue
-
-                # append result
-                results.append({'url': url,
-                                'title': title,
-                                'content': content
-                                })
-        except:
-            logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
+            url = eval_xpath(result, href_xpath)[0]
+            content = extract_text_from_dom(result, content_xpath)
+            results.append({
+                'url':      url,
+                'title':    title,
+                'content':  content
+                })
+        except Exception as e:  # pylint: disable=broad-except
+            logger.error(e, exc_info=True)
+            #from lxml import etree
+            #logger.debug(etree.tostring(result, pretty_print=True))
+            #import pdb
+            #pdb.set_trace()
             continue
             continue
 
 
     # parse suggestion
     # parse suggestion
@@ -290,102 +272,16 @@ def response(resp):
     # return results
     # return results
     return results
     return results
 
 
-
-def parse_images(result, google_hostname):
-    results = []
-    for image in eval_xpath(result, images_xpath):
-        url = parse_url(extract_text(eval_xpath(image, image_url_xpath)[0]), google_hostname)
-        img_src = extract_text(eval_xpath(image, image_img_src_xpath)[0])
-
-        # append result
-        results.append({'url': url,
-                        'title': '',
-                        'content': '',
-                        'img_src': img_src,
-                        'template': 'images.html'
-                        })
-
-    return results
-
-
-def parse_map_near(parsed_url, x, google_hostname):
-    results = []
-
-    for result in x:
-        title = extract_text_from_dom(result, map_near_title)
-        url = parse_url(extract_text_from_dom(result, map_near_url), google_hostname)
-        attributes = []
-        phone = extract_text_from_dom(result, map_near_phone)
-        add_attributes(attributes, property_phone, phone, 'tel:' + phone)
-        results.append({'title': title,
-                        'url': url,
-                        'content': attributes_to_html(attributes)
-                        })
-
-    return results
-
-
-def parse_map_detail(parsed_url, result, google_hostname):
-    results = []
-
-    # try to parse the geoloc
-    m = re.search(r'@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path)
-    if m is None:
-        m = re.search(r'll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query)
-
-    if m is not None:
-        # geoloc found (ignored)
-        lon = float(m.group(2))  # noqa
-        lat = float(m.group(1))  # noqa
-        zoom = int(m.group(3))  # noqa
-
-        # attributes
-        attributes = []
-        address = extract_text_from_dom(result, map_address_xpath)
-        phone = extract_text_from_dom(result, map_phone_xpath)
-        add_attributes(attributes, property_address, address, 'geo:' + str(lat) + ',' + str(lon))
-        add_attributes(attributes, property_phone, phone, 'tel:' + phone)
-
-        # title / content / url
-        website_title = extract_text_from_dom(result, map_website_title_xpath)
-        content = extract_text_from_dom(result, content_xpath)
-        website_url = parse_url(extract_text_from_dom(result, map_website_url_xpath), google_hostname)
-
-        # add a result if there is a website
-        if website_url is not None:
-            results.append({'title': website_title,
-                            'content': (content + '<br />' if content is not None else '')
-                            + attributes_to_html(attributes),
-                            'url': website_url
-                            })
-
-    return results
-
-
-def add_attributes(attributes, name, value, url):
-    if value is not None and len(value) > 0:
-        attributes.append({'label': name, 'value': value, 'url': url})
-
-
-def attributes_to_html(attributes):
-    retval = '<table class="table table-striped">'
-    for a in attributes:
-        value = a.get('value')
-        if 'url' in a:
-            value = '<a href="' + a.get('url') + '">' + value + '</a>'
-        retval = retval + '<tr><th>' + a.get('label') + '</th><td>' + value + '</td></tr>'
-    retval = retval + '</table>'
-    return retval
-
-
 # get supported languages from their site
 # get supported languages from their site
 def _fetch_supported_languages(resp):
 def _fetch_supported_languages(resp):
-    supported_languages = {}
+    ret_val = {}
     dom = html.fromstring(resp.text)
     dom = html.fromstring(resp.text)
-    options = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]')
-    for option in options:
-        code = eval_xpath(option, './@value')[0].split('_')[-1]
-        name = eval_xpath(option, './@data-name')[0].title()
-        supported_languages[code] = {"name": name}
 
 
-    return supported_languages
+    radio_buttons = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lang"]')
+
+    for x in radio_buttons:
+        name = x.get("data-name")
+        code = x.get("value")
+        ret_val[code] = {"name": name}
+
+    return ret_val

+ 177 - 69
searx/engines/google_images.py

@@ -1,97 +1,205 @@
-"""
- Google (Images)
+# SPDX-License-Identifier: AGPL-3.0-or-later
+"""Google (Images)
+
+:website:     https://images.google.com (redirected to subdomain www.)
+:provide-api: yes (https://developers.google.com/custom-search/)
+:using-api:   not the offical, since it needs registration to another service
+:results:     HTML
+:stable:      no
+:template:    images.html
+:parse:       url, title, content, source, thumbnail_src, img_src
+
+For detailed description of the *REST-full* API see: `Query Parameter
+Definitions`_.
+
+.. _admonition:: Content-Security-Policy (CSP)
 
 
- @website     https://www.google.com
- @provide-api yes (https://developers.google.com/custom-search/)
+   This engine needs to allow images from the `data URLs`_ (prefixed with the
+   ``data:` scheme).::
+
+     Header set Content-Security-Policy "img-src 'self' data: ;"
+
+.. _Query Parameter Definitions:
+   https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
 
 
- @using-api   no
- @results     HTML chunks with JSON inside
- @stable      no
- @parse       url, title, img_src
 """
 """
 
 
-from datetime import date, timedelta
-from json import loads
 from lxml import html
 from lxml import html
-from searx.url_utils import urlencode
+from flask_babel import gettext
+from searx import logger
+from searx.url_utils import urlencode, urlparse
+from searx.utils import eval_xpath
+from searx.engines.xpath import extract_text
+
+# pylint: disable=unused-import
+from searx.engines.google import (
+    supported_languages_url
+    ,  _fetch_supported_languages
+)
+# pylint: enable=unused-import
+
+from searx.engines.google import (
+    get_lang_country
+    , google_domains
+    , time_range_dict
+)
+
+logger = logger.getChild('google images')
 
 
 # engine dependent config
 # engine dependent config
+
 categories = ['images']
 categories = ['images']
-paging = True
-safesearch = True
+paging = False
+language_support = True
+use_locale_domain = True
 time_range_support = True
 time_range_support = True
-number_of_results = 100
-
-search_url = 'https://www.google.com/search'\
-    '?{query}'\
-    '&tbm=isch'\
-    '&yv=2'\
-    '&{search_options}'
-time_range_attr = "qdr:{range}"
-time_range_custom_attr = "cdr:1,cd_min:{start},cd_max{end}"
-time_range_dict = {'day': 'd',
-                   'week': 'w',
-                   'month': 'm'}
+safesearch = True
 
 
+filter_mapping = {
+    0 : 'images',
+    1 : 'active',
+    2 : 'active'
+}
+
+def scrap_out_thumbs(dom):
+    """Scrap out thumbnail data from <script> tags.
+    """
+    ret_val = dict()
+    for script in eval_xpath(dom, '//script[contains(., "_setImgSrc(")]'):
+        _script = script.text
+        # _setImgSrc('0','data:image\/jpeg;base64,\/9j\/4AAQSkZJR ....');
+        _thumb_no, _img_data = _script[len("_setImgSrc("):-2].split(",",1)
+        _thumb_no = _thumb_no.replace("'","")
+        _img_data = _img_data.replace("'","")
+        _img_data = _img_data.replace(r"\/", r"/")
+        ret_val[_thumb_no] = _img_data.replace(r"\x3d", "=")
+    return ret_val
 
 
-# do search-request
 def request(query, params):
 def request(query, params):
-    search_options = {
-        'ijn': params['pageno'] - 1,
-        'start': (params['pageno'] - 1) * number_of_results
-    }
+    """Google-Video search request"""
+
+    language, country, lang_country = get_lang_country(
+        # pylint: disable=undefined-variable
+        params, supported_languages, language_aliases
+    )
+    subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
+
+    query_url = 'https://'+ subdomain + '/search' + "?" + urlencode({
+        'q':   query,
+        'tbm': "isch",
+        'hl':  lang_country,
+        'lr': "lang_" + language,
+        'ie': "utf8",
+        'oe': "utf8",
+        'num': 30,
+    })
 
 
     if params['time_range'] in time_range_dict:
     if params['time_range'] in time_range_dict:
-        search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])
-    elif params['time_range'] == 'year':
-        now = date.today()
-        then = now - timedelta(days=365)
-        start = then.strftime('%m/%d/%Y')
-        end = now.strftime('%m/%d/%Y')
-        search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
-
-    if safesearch and params['safesearch']:
-        search_options['safe'] = 'on'
-
-    params['url'] = search_url.format(query=urlencode({'q': query}),
-                                      search_options=urlencode(search_options))
-
+        query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
+    if params['safesearch']:
+        query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
+
+    params['url'] = query_url
+    logger.debug("query_url --> %s", query_url)
+
+    params['headers']['Accept-Language'] = (
+        "%s,%s;q=0.8,%s;q=0.5" % (lang_country, language, language))
+    logger.debug(
+        "HTTP Accept-Language --> %s", params['headers']['Accept-Language'])
+    params['headers']['Accept'] = (
+        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
+        )
+    #params['google_subdomain'] = subdomain
     return params
     return params
 
 
 
 
-# get response from search-request
 def response(resp):
 def response(resp):
+    """Get response from google's search request"""
     results = []
     results = []
 
 
+    # detect google sorry
+    resp_url = urlparse(resp.url)
+    if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
+        raise RuntimeWarning('sorry.google.com')
+
+    if resp_url.path.startswith('/sorry'):
+        raise RuntimeWarning(gettext('CAPTCHA required'))
+
+    # which subdomain ?
+    # subdomain = resp.search_params.get('google_subdomain')
+
+    # convert the text to dom
     dom = html.fromstring(resp.text)
     dom = html.fromstring(resp.text)
+    img_bas64_map = scrap_out_thumbs(dom)
 
 
     # parse results
     # parse results
-    for result in dom.xpath('//div[contains(@class, "rg_meta")]/text()'):
+    #
+    # root element::
+    #     <div id="islmp" ..>
+    # result div per image::
+    #     <div jsmodel="tTXmib"> / <div jsaction="..." data-id="..."
+    #     The data-id matches to a item in a json-data structure in::
+    #         <script nonce="I+vqelcy/01CKiBJi5Z1Ow">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...
+    #     In this structure the ling to the origin PNG, JPG or whatever is given
+    #     (we do not blow out the link there, you could still implement that)
+    # first link per image-div contains a <img> with the data-iid for bas64 encoded image data::
+    #      <img class="rg_i Q4LuWd" data-iid="0"
+    # second link per image-div is the target link::
+    #      <a class="VFACy kGQAp" href="https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper">
+    # the second link also contains two div tags with the *description* and *publisher*::
+    #      <div class="WGvvNb">The Sacrament of the Last Supper ...</div>
+    #      <div class="fxgdke">en.wikipedia.org</div>
+
+    root = eval_xpath(dom, '//div[@id="islmp"]')
+    if not root:
+        logger.error("did not find root element id='islmp'")
+        return results
+
+    root = root[0]
+    for img_node in eval_xpath(root, './/img[contains(@class, "rg_i")]'):
 
 
         try:
         try:
-            metadata = loads(result)
-
-            img_format = metadata.get('ity', '')
-            img_width = metadata.get('ow', '')
-            img_height = metadata.get('oh', '')
-            if img_width and img_height:
-                img_format += " {0}x{1}".format(img_width, img_height)
-
-            source = metadata.get('st', '')
-            source_url = metadata.get('isu', '')
-            if source_url:
-                source += " ({0})".format(source_url)
-
-            results.append({'url': metadata['ru'],
-                            'title': metadata['pt'],
-                            'content': metadata.get('s', ''),
-                            'source': source,
-                            'img_format': img_format,
-                            'thumbnail_src': metadata['tu'],
-                            'img_src': metadata['ou'],
-                            'template': 'images.html'})
-
-        except:
+            img_alt = eval_xpath(img_node, '@alt')[0]
+
+            img_base64_id = eval_xpath(img_node, '@data-iid')
+            if img_base64_id:
+                img_base64_id = img_base64_id[0]
+                thumbnail_src = img_bas64_map[img_base64_id]
+            else:
+                thumbnail_src = eval_xpath(img_node, '@src')
+                if not thumbnail_src:
+                    thumbnail_src = eval_xpath(img_node, '@data-src')
+                if thumbnail_src:
+                    thumbnail_src = thumbnail_src[0]
+                else:
+                    thumbnail_src = ''
+
+            link_node = eval_xpath(img_node, '../../../a[2]')[0]
+            url = eval_xpath(link_node, '@href')[0]
+
+            pub_nodes = eval_xpath(link_node, './div/div')
+            pub_descr = img_alt
+            pub_source = ''
+            if pub_nodes:
+                pub_descr = extract_text(pub_nodes[0])
+                pub_source = extract_text(pub_nodes[1])
+
+            results.append({
+                'url': url,
+                'title': img_alt,
+                'content': pub_descr,
+                'source': pub_source,
+                'img_src': url,
+                # 'img_format': img_format,
+                'thumbnail_src': thumbnail_src,
+                'template': 'images.html'
+            })
+        except Exception as e:  # pylint: disable=broad-except
+            logger.error(e, exc_info=True)
+            #from lxml import etree
+            #logger.debug(etree.tostring(img_node, pretty_print=True))
+            #import pdb
+            #pdb.set_trace()
             continue
             continue
 
 
     return results
     return results