google_images.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Google (Images)
  4. For detailed description of the *REST-full* API see: `Query Parameter
  5. Definitions`_.
  6. .. _admonition:: Content-Security-Policy (CSP)
  7. This engine needs to allow images from the `data URLs`_ (prefixed with the
  8. ``data:` scheme).::
  9. Header set Content-Security-Policy "img-src 'self' data: ;"
  10. .. _Query Parameter Definitions:
  11. https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
  12. .. _data URLs:
  13. https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs
  14. """
  15. from urllib.parse import urlencode, unquote
  16. from lxml import html
  17. from searx import logger
  18. from searx.utils import (
  19. eval_xpath,
  20. eval_xpath_list,
  21. eval_xpath_getindex,
  22. extract_text,
  23. )
  24. from searx.engines.google import (
  25. get_lang_info,
  26. time_range_dict,
  27. detect_google_sorry,
  28. )
  29. # pylint: disable=unused-import
  30. from searx.engines.google import (
  31. supported_languages_url
  32. , _fetch_supported_languages
  33. )
  34. # pylint: enable=unused-import
  35. logger = logger.getChild('google images')
  36. # about
  37. about = {
  38. "website": 'https://images.google.com',
  39. "wikidata_id": 'Q521550',
  40. "official_api_documentation": 'https://developers.google.com/custom-search',
  41. "use_official_api": False,
  42. "require_api_key": False,
  43. "results": 'HTML',
  44. }
  45. # engine dependent config
  46. categories = ['images']
  47. paging = False
  48. use_locale_domain = True
  49. time_range_support = True
  50. safesearch = True
  51. filter_mapping = {
  52. 0: 'images',
  53. 1: 'active',
  54. 2: 'active'
  55. }
  56. def scrap_out_thumbs(dom):
  57. """Scrap out thumbnail data from <script> tags.
  58. """
  59. ret_val = dict()
  60. for script in eval_xpath(dom, '//script[contains(., "_setImgSrc(")]'):
  61. _script = script.text
  62. # _setImgSrc('0','data:image\/jpeg;base64,\/9j\/4AAQSkZJR ....');
  63. _thumb_no, _img_data = _script[len("_setImgSrc("):-2].split(",", 1)
  64. _thumb_no = _thumb_no.replace("'", "")
  65. _img_data = _img_data.replace("'", "")
  66. _img_data = _img_data.replace(r"\/", r"/")
  67. ret_val[_thumb_no] = _img_data.replace(r"\x3d", "=")
  68. return ret_val
  69. def scrap_img_by_id(script, data_id):
  70. """Get full image URL by data-id in parent element
  71. """
  72. img_url = ''
  73. _script = script.split('\n')
  74. for i, line in enumerate(_script):
  75. if 'gstatic.com/images' in line and data_id in line:
  76. url_line = _script[i + 1]
  77. img_url = url_line.split('"')[1]
  78. img_url = unquote(img_url.replace(r'\u00', r'%'))
  79. return img_url
  80. def request(query, params):
  81. """Google-Video search request"""
  82. lang_info = get_lang_info(
  83. # pylint: disable=undefined-variable
  84. params, supported_languages, language_aliases, False
  85. )
  86. logger.debug(
  87. "HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
  88. query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
  89. 'q': query,
  90. 'tbm': "isch",
  91. **lang_info['params'],
  92. 'ie': "utf8",
  93. 'oe': "utf8",
  94. 'num': 30,
  95. })
  96. if params['time_range'] in time_range_dict:
  97. query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
  98. if params['safesearch']:
  99. query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
  100. params['url'] = query_url
  101. params['headers'].update(lang_info['headers'])
  102. params['headers']['Accept'] = (
  103. 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  104. )
  105. return params
  106. def response(resp):
  107. """Get response from google's search request"""
  108. results = []
  109. detect_google_sorry(resp)
  110. # convert the text to dom
  111. dom = html.fromstring(resp.text)
  112. img_bas64_map = scrap_out_thumbs(dom)
  113. img_src_script = eval_xpath_getindex(
  114. dom, '//script[contains(., "AF_initDataCallback({key: ")]', 1).text
  115. # parse results
  116. #
  117. # root element::
  118. # <div id="islmp" ..>
  119. # result div per image::
  120. # <div jsmodel="tTXmib"> / <div jsaction="..." data-id="..."
  121. # The data-id matches to a item in a json-data structure in::
  122. # <script nonce="I+vqelcy/01CKiBJi5Z1Ow">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...
  123. # In this structure the link to the origin PNG, JPG or whatever is given
  124. # first link per image-div contains a <img> with the data-iid for bas64 encoded image data::
  125. # <img class="rg_i Q4LuWd" data-iid="0"
  126. # second link per image-div is the target link::
  127. # <a class="VFACy kGQAp" href="https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper">
  128. # the second link also contains two div tags with the *description* and *publisher*::
  129. # <div class="WGvvNb">The Sacrament of the Last Supper ...</div>
  130. # <div class="fxgdke">en.wikipedia.org</div>
  131. root = eval_xpath(dom, '//div[@id="islmp"]')
  132. if not root:
  133. logger.error("did not find root element id='islmp'")
  134. return results
  135. root = root[0]
  136. for img_node in eval_xpath_list(root, './/img[contains(@class, "rg_i")]'):
  137. img_alt = eval_xpath_getindex(img_node, '@alt', 0)
  138. img_base64_id = eval_xpath(img_node, '@data-iid')
  139. if img_base64_id:
  140. img_base64_id = img_base64_id[0]
  141. thumbnail_src = img_bas64_map[img_base64_id]
  142. else:
  143. thumbnail_src = eval_xpath(img_node, '@src')
  144. if not thumbnail_src:
  145. thumbnail_src = eval_xpath(img_node, '@data-src')
  146. if thumbnail_src:
  147. thumbnail_src = thumbnail_src[0]
  148. else:
  149. thumbnail_src = ''
  150. link_node = eval_xpath_getindex(img_node, '../../../a[2]', 0)
  151. url = eval_xpath_getindex(link_node, '@href', 0)
  152. pub_nodes = eval_xpath(link_node, './div/div')
  153. pub_descr = img_alt
  154. pub_source = ''
  155. if pub_nodes:
  156. pub_descr = extract_text(pub_nodes[0])
  157. pub_source = extract_text(pub_nodes[1])
  158. img_src_id = eval_xpath_getindex(img_node, '../../../@data-id', 0)
  159. src_url = scrap_img_by_id(img_src_script, img_src_id)
  160. if not src_url:
  161. src_url = thumbnail_src
  162. results.append({
  163. 'url': url,
  164. 'title': img_alt,
  165. 'content': pub_descr,
  166. 'source': pub_source,
  167. 'img_src': src_url,
  168. # 'img_format': img_format,
  169. 'thumbnail_src': thumbnail_src,
  170. 'template': 'images.html'
  171. })
  172. return results