google_videos.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """This is the implementation of the google videos engine.
  4. .. admonition:: Content-Security-Policy (CSP)
  5. This engine needs to allow images from the `data URLs`_ (prefixed with the
  6. ``data:`` scheme)::
  7. Header set Content-Security-Policy "img-src 'self' data: ;"
  8. .. _data URLs:
  9. https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs
  10. """
  11. # pylint: disable=invalid-name
  12. import re
  13. from urllib.parse import urlencode
  14. from lxml import html
  15. from searx.utils import (
  16. eval_xpath,
  17. eval_xpath_list,
  18. eval_xpath_getindex,
  19. extract_text,
  20. )
  21. from searx.engines.google import (
  22. get_lang_info,
  23. time_range_dict,
  24. filter_mapping,
  25. g_section_with_header,
  26. title_xpath,
  27. suggestion_xpath,
  28. detect_google_sorry,
  29. )
  30. # pylint: disable=unused-import
  31. from searx.engines.google import supported_languages_url, _fetch_supported_languages
  32. # pylint: enable=unused-import
  33. # about
  34. about = {
  35. "website": 'https://www.google.com',
  36. "wikidata_id": 'Q219885',
  37. "official_api_documentation": 'https://developers.google.com/custom-search',
  38. "use_official_api": False,
  39. "require_api_key": False,
  40. "results": 'HTML',
  41. }
  42. # engine dependent config
  43. categories = ['videos', 'web']
  44. paging = False
  45. language_support = True
  46. use_locale_domain = True
  47. time_range_support = True
  48. safesearch = True
  49. RE_CACHE = {}
  50. def _re(regexpr):
  51. """returns compiled regular expression"""
  52. RE_CACHE[regexpr] = RE_CACHE.get(regexpr, re.compile(regexpr))
  53. return RE_CACHE[regexpr]
  54. def scrap_out_thumbs_src(dom):
  55. ret_val = {}
  56. thumb_name = 'dimg_'
  57. for script in eval_xpath_list(dom, '//script[contains(., "google.ldi={")]'):
  58. _script = script.text
  59. # "dimg_35":"https://i.ytimg.c....",
  60. _dimurl = _re("s='([^']*)").findall(_script)
  61. for k, v in _re('(' + thumb_name + '[0-9]*)":"(http[^"]*)').findall(_script):
  62. v = v.replace(r'\u003d', '=')
  63. v = v.replace(r'\u0026', '&')
  64. ret_val[k] = v
  65. logger.debug("found %s imgdata for: %s", thumb_name, ret_val.keys())
  66. return ret_val
  67. def scrap_out_thumbs(dom):
  68. """Scrap out thumbnail data from <script> tags."""
  69. ret_val = {}
  70. thumb_name = 'dimg_'
  71. for script in eval_xpath_list(dom, '//script[contains(., "_setImagesSrc")]'):
  72. _script = script.text
  73. # var s='data:image/jpeg;base64, ...'
  74. _imgdata = _re("s='([^']*)").findall(_script)
  75. if not _imgdata:
  76. continue
  77. # var ii=['dimg_17']
  78. for _vidthumb in _re(r"(%s\d+)" % thumb_name).findall(_script):
  79. # At least the equal sign in the URL needs to be decoded
  80. ret_val[_vidthumb] = _imgdata[0].replace(r"\x3d", "=")
  81. logger.debug("found %s imgdata for: %s", thumb_name, ret_val.keys())
  82. return ret_val
  83. def request(query, params):
  84. """Google-Video search request"""
  85. lang_info = get_lang_info(params, supported_languages, language_aliases, False)
  86. logger.debug("HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
  87. query_url = (
  88. 'https://'
  89. + lang_info['subdomain']
  90. + '/search'
  91. + "?"
  92. + urlencode({'q': query, 'tbm': "vid", **lang_info['params'], 'ie': "utf8", 'oe': "utf8", 'ucbcb': 1})
  93. )
  94. if params['time_range'] in time_range_dict:
  95. query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
  96. if params['safesearch']:
  97. query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
  98. params['url'] = query_url
  99. params['headers'].update(lang_info['headers'])
  100. params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  101. return params
  102. def response(resp):
  103. """Get response from google's search request"""
  104. results = []
  105. detect_google_sorry(resp)
  106. # convert the text to dom
  107. dom = html.fromstring(resp.text)
  108. vidthumb_imgdata = scrap_out_thumbs(dom)
  109. thumbs_src = scrap_out_thumbs_src(dom)
  110. logger.debug(str(thumbs_src))
  111. # parse results
  112. for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):
  113. # ignore google *sections*
  114. if extract_text(eval_xpath(result, g_section_with_header)):
  115. logger.debug("ingoring <g-section-with-header>")
  116. continue
  117. # ingnore articles without an image id / e.g. news articles
  118. img_id = eval_xpath_getindex(result, './/g-img/img/@id', 0, default=None)
  119. if img_id is None:
  120. logger.error("no img_id found in item %s (news article?)", len(results) + 1)
  121. continue
  122. img_src = vidthumb_imgdata.get(img_id, None)
  123. if not img_src:
  124. img_src = thumbs_src.get(img_id, "")
  125. title = extract_text(eval_xpath_getindex(result, title_xpath, 0))
  126. url = eval_xpath_getindex(result, './/div[@class="dXiKIc"]//a/@href', 0)
  127. length = extract_text(eval_xpath(result, './/div[contains(@class, "P7xzyf")]/span/span'))
  128. c_node = eval_xpath_getindex(result, './/div[@class="Uroaid"]', 0)
  129. content = extract_text(c_node)
  130. pub_info = extract_text(eval_xpath(result, './/div[@class="Zg1NU"]'))
  131. results.append(
  132. {
  133. 'url': url,
  134. 'title': title,
  135. 'content': content,
  136. 'length': length,
  137. 'author': pub_info,
  138. 'thumbnail': img_src,
  139. 'template': 'videos.html',
  140. }
  141. )
  142. # parse suggestion
  143. for suggestion in eval_xpath_list(dom, suggestion_xpath):
  144. # append suggestion
  145. results.append({'suggestion': extract_text(suggestion)})
  146. return results