google_scholar.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Google (Scholar)
  4. For detailed description of the *REST-full* API see: `Query Parameter
  5. Definitions`_.
  6. .. _Query Parameter Definitions:
  7. https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
  8. """
  9. # pylint: disable=invalid-name, missing-function-docstring
  10. from urllib.parse import urlencode
  11. from datetime import datetime
  12. from lxml import html
  13. from searx import logger
  14. from searx.utils import (
  15. eval_xpath,
  16. eval_xpath_list,
  17. extract_text,
  18. )
  19. from searx.engines.google import (
  20. get_lang_info,
  21. time_range_dict,
  22. detect_google_sorry,
  23. )
  24. # pylint: disable=unused-import
  25. from searx.engines.google import (
  26. supported_languages_url,
  27. _fetch_supported_languages,
  28. )
  29. # pylint: enable=unused-import
  30. # about
  31. about = {
  32. "website": 'https://scholar.google.com',
  33. "wikidata_id": 'Q494817',
  34. "official_api_documentation": 'https://developers.google.com/custom-search',
  35. "use_official_api": False,
  36. "require_api_key": False,
  37. "results": 'HTML',
  38. }
  39. # engine dependent config
  40. categories = ['science']
  41. paging = True
  42. language_support = True
  43. use_locale_domain = True
  44. time_range_support = True
  45. safesearch = False
  46. logger = logger.getChild('google scholar')
  47. def time_range_url(params):
  48. """Returns a URL query component for a google-Scholar time range based on
  49. ``params['time_range']``. Google-Scholar does only support ranges in years.
  50. To have any effect, all the Searx ranges (*day*, *week*, *month*, *year*)
  51. are mapped to *year*. If no range is set, an empty string is returned.
  52. Example::
  53. &as_ylo=2019
  54. """
  55. # as_ylo=2016&as_yhi=2019
  56. ret_val = ''
  57. if params['time_range'] in time_range_dict:
  58. ret_val= urlencode({'as_ylo': datetime.now().year -1 })
  59. return '&' + ret_val
  60. def request(query, params):
  61. """Google-Scholar search request"""
  62. offset = (params['pageno'] - 1) * 10
  63. lang_info = get_lang_info(
  64. # pylint: disable=undefined-variable
  65. # params, {}, language_aliases
  66. params, supported_languages, language_aliases, False
  67. )
  68. # subdomain is: scholar.google.xy
  69. lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")
  70. query_url = 'https://'+ lang_info['subdomain'] + '/scholar' + "?" + urlencode({
  71. 'q': query,
  72. **lang_info['params'],
  73. 'ie': "utf8",
  74. 'oe': "utf8",
  75. 'start' : offset,
  76. })
  77. query_url += time_range_url(params)
  78. logger.debug("query_url --> %s", query_url)
  79. params['url'] = query_url
  80. logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
  81. params['headers'].update(lang_info['headers'])
  82. params['headers']['Accept'] = (
  83. 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  84. )
  85. #params['google_subdomain'] = subdomain
  86. return params
  87. def response(resp):
  88. """Get response from google's search request"""
  89. results = []
  90. detect_google_sorry(resp)
  91. # which subdomain ?
  92. # subdomain = resp.search_params.get('google_subdomain')
  93. # convert the text to dom
  94. dom = html.fromstring(resp.text)
  95. # parse results
  96. for result in eval_xpath_list(dom, '//div[@class="gs_ri"]'):
  97. title = extract_text(eval_xpath(result, './h3[1]//a'))
  98. if not title:
  99. # this is a [ZITATION] block
  100. continue
  101. url = eval_xpath(result, './h3[1]//a/@href')[0]
  102. content = extract_text(eval_xpath(result, './div[@class="gs_rs"]')) or ''
  103. pub_info = extract_text(eval_xpath(result, './div[@class="gs_a"]'))
  104. if pub_info:
  105. content += "[%s]" % pub_info
  106. pub_type = extract_text(eval_xpath(result, './/span[@class="gs_ct1"]'))
  107. if pub_type:
  108. title = title + " " + pub_type
  109. results.append({
  110. 'url': url,
  111. 'title': title,
  112. 'content': content,
  113. })
  114. # parse suggestion
  115. for suggestion in eval_xpath(dom, '//div[contains(@class, "gs_qsuggest_wrap")]//li//a'):
  116. # append suggestion
  117. results.append({'suggestion': extract_text(suggestion)})
  118. for correction in eval_xpath(dom, '//div[@class="gs_r gs_pda"]/a'):
  119. results.append({'correction': extract_text(correction)})
  120. return results