google_scholar.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Google (Scholar)
  4. For detailed description of the *REST-full* API see: `Query Parameter
  5. Definitions`_.
  6. .. _Query Parameter Definitions:
  7. https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
  8. """
  9. # pylint: disable=invalid-name
  10. from urllib.parse import urlencode
  11. from datetime import datetime
  12. from typing import Optional
  13. from lxml import html
  14. from searx.utils import (
  15. eval_xpath,
  16. eval_xpath_getindex,
  17. eval_xpath_list,
  18. extract_text,
  19. )
  20. from searx.engines.google import (
  21. get_lang_info,
  22. time_range_dict,
  23. detect_google_sorry,
  24. )
  25. # pylint: disable=unused-import
  26. from searx.engines.google import (
  27. fetch_traits,
  28. supported_languages_url,
  29. _fetch_supported_languages,
  30. )
  31. # pylint: enable=unused-import
  32. # about
  33. about = {
  34. "website": 'https://scholar.google.com',
  35. "wikidata_id": 'Q494817',
  36. "official_api_documentation": 'https://developers.google.com/custom-search',
  37. "use_official_api": False,
  38. "require_api_key": False,
  39. "results": 'HTML',
  40. }
  41. # engine dependent config
  42. categories = ['science', 'scientific publications']
  43. paging = True
  44. language_support = True
  45. use_locale_domain = True
  46. time_range_support = True
  47. safesearch = False
  48. send_accept_language_header = True
  49. def time_range_url(params):
  50. """Returns a URL query component for a google-Scholar time range based on
  51. ``params['time_range']``. Google-Scholar does only support ranges in years.
  52. To have any effect, all the Searx ranges (*day*, *week*, *month*, *year*)
  53. are mapped to *year*. If no range is set, an empty string is returned.
  54. Example::
  55. &as_ylo=2019
  56. """
  57. # as_ylo=2016&as_yhi=2019
  58. ret_val = ''
  59. if params['time_range'] in time_range_dict:
  60. ret_val = urlencode({'as_ylo': datetime.now().year - 1})
  61. return '&' + ret_val
  62. def request(query, params):
  63. """Google-Scholar search request"""
  64. offset = (params['pageno'] - 1) * 10
  65. lang_info = get_lang_info(params, supported_languages, language_aliases, False)
  66. # subdomain is: scholar.google.xy
  67. lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")
  68. query_url = (
  69. 'https://'
  70. + lang_info['subdomain']
  71. + '/scholar'
  72. + "?"
  73. + urlencode({'q': query, **lang_info['params'], 'ie': "utf8", 'oe': "utf8", 'start': offset})
  74. )
  75. query_url += time_range_url(params)
  76. params['url'] = query_url
  77. params['cookies']['CONSENT'] = "YES+"
  78. params['headers'].update(lang_info['headers'])
  79. params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  80. # params['google_subdomain'] = subdomain
  81. return params
  82. def parse_gs_a(text: Optional[str]):
  83. """Parse the text written in green.
  84. Possible formats:
  85. * "{authors} - {journal}, {year} - {publisher}"
  86. * "{authors} - {year} - {publisher}"
  87. * "{authors} - {publisher}"
  88. """
  89. if text is None or text == "":
  90. return None, None, None, None
  91. s_text = text.split(' - ')
  92. authors = s_text[0].split(', ')
  93. publisher = s_text[-1]
  94. if len(s_text) != 3:
  95. return authors, None, publisher, None
  96. # the format is "{authors} - {journal}, {year} - {publisher}" or "{authors} - {year} - {publisher}"
  97. # get journal and year
  98. journal_year = s_text[1].split(', ')
  99. # journal is optional and may contains some coma
  100. if len(journal_year) > 1:
  101. journal = ', '.join(journal_year[0:-1])
  102. if journal == '…':
  103. journal = None
  104. else:
  105. journal = None
  106. # year
  107. year = journal_year[-1]
  108. try:
  109. publishedDate = datetime.strptime(year.strip(), '%Y')
  110. except ValueError:
  111. publishedDate = None
  112. return authors, journal, publisher, publishedDate
  113. def response(resp): # pylint: disable=too-many-locals
  114. """Get response from google's search request"""
  115. results = []
  116. detect_google_sorry(resp)
  117. # which subdomain ?
  118. # subdomain = resp.search_params.get('google_subdomain')
  119. # convert the text to dom
  120. dom = html.fromstring(resp.text)
  121. # parse results
  122. for result in eval_xpath_list(dom, '//div[@data-cid]'):
  123. title = extract_text(eval_xpath(result, './/h3[1]//a'))
  124. if not title:
  125. # this is a [ZITATION] block
  126. continue
  127. pub_type = extract_text(eval_xpath(result, './/span[@class="gs_ct1"]'))
  128. if pub_type:
  129. pub_type = pub_type[1:-1].lower()
  130. url = eval_xpath_getindex(result, './/h3[1]//a/@href', 0)
  131. content = extract_text(eval_xpath(result, './/div[@class="gs_rs"]'))
  132. authors, journal, publisher, publishedDate = parse_gs_a(
  133. extract_text(eval_xpath(result, './/div[@class="gs_a"]'))
  134. )
  135. if publisher in url:
  136. publisher = None
  137. # cited by
  138. comments = extract_text(eval_xpath(result, './/div[@class="gs_fl"]/a[starts-with(@href,"/scholar?cites=")]'))
  139. # link to the html or pdf document
  140. html_url = None
  141. pdf_url = None
  142. doc_url = eval_xpath_getindex(result, './/div[@class="gs_or_ggsm"]/a/@href', 0, default=None)
  143. doc_type = extract_text(eval_xpath(result, './/span[@class="gs_ctg2"]'))
  144. if doc_type == "[PDF]":
  145. pdf_url = doc_url
  146. else:
  147. html_url = doc_url
  148. results.append(
  149. {
  150. 'template': 'paper.html',
  151. 'type': pub_type,
  152. 'url': url,
  153. 'title': title,
  154. 'authors': authors,
  155. 'publisher': publisher,
  156. 'journal': journal,
  157. 'publishedDate': publishedDate,
  158. 'content': content,
  159. 'comments': comments,
  160. 'html_url': html_url,
  161. 'pdf_url': pdf_url,
  162. }
  163. )
  164. # parse suggestion
  165. for suggestion in eval_xpath(dom, '//div[contains(@class, "gs_qsuggest_wrap")]//li//a'):
  166. # append suggestion
  167. results.append({'suggestion': extract_text(suggestion)})
  168. for correction in eval_xpath(dom, '//div[@class="gs_r gs_pda"]/a'):
  169. results.append({'correction': extract_text(correction)})
  170. return results