semantic_scholar.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Semantic Scholar (Science)"""
  3. from json import dumps
  4. from datetime import datetime
  5. from lxml import html
  6. from flask_babel import gettext
  7. from searx.network import get
  8. from searx.utils import eval_xpath_getindex, gen_useragent, html_to_text
  9. about = {
  10. "website": 'https://www.semanticscholar.org/',
  11. "wikidata_id": 'Q22908627',
  12. "official_api_documentation": 'https://api.semanticscholar.org/',
  13. "use_official_api": True,
  14. "require_api_key": False,
  15. "results": 'JSON',
  16. }
  17. categories = ['science', 'scientific publications']
  18. paging = True
  19. search_url = 'https://www.semanticscholar.org/api/1/search'
  20. base_url = 'https://www.semanticscholar.org'
  21. def _get_ui_version():
  22. resp = get(base_url)
  23. if not resp.ok:
  24. raise RuntimeError("Can't determine Semantic Scholar UI version")
  25. doc = html.fromstring(resp.text)
  26. ui_version = eval_xpath_getindex(doc, "//meta[@name='s2-ui-version']/@content", 0)
  27. if not ui_version:
  28. raise RuntimeError("Can't determine Semantic Scholar UI version")
  29. return ui_version
  30. def request(query, params):
  31. params['url'] = search_url
  32. params['method'] = 'POST'
  33. params['headers'] = {
  34. 'Content-Type': 'application/json',
  35. 'X-S2-UI-Version': _get_ui_version(),
  36. 'X-S2-Client': "webapp-browser",
  37. 'User-Agent': gen_useragent(),
  38. }
  39. params['data'] = dumps(
  40. {
  41. "queryString": query,
  42. "page": params['pageno'],
  43. "pageSize": 10,
  44. "sort": "relevance",
  45. "getQuerySuggestions": False,
  46. "authors": [],
  47. "coAuthors": [],
  48. "venues": [],
  49. "performTitleMatch": True,
  50. }
  51. )
  52. return params
  53. def response(resp):
  54. res = resp.json()
  55. results = []
  56. for result in res['results']:
  57. url = result.get('primaryPaperLink', {}).get('url')
  58. if not url and result.get('links'):
  59. url = result.get('links')[0]
  60. if not url:
  61. alternatePaperLinks = result.get('alternatePaperLinks')
  62. if alternatePaperLinks:
  63. url = alternatePaperLinks[0].get('url')
  64. if not url:
  65. url = base_url + '/paper/%s' % result['id']
  66. # publishedDate
  67. if 'pubDate' in result:
  68. publishedDate = datetime.strptime(result['pubDate'], "%Y-%m-%d")
  69. else:
  70. publishedDate = None
  71. # authors
  72. authors = [author[0]['name'] for author in result.get('authors', [])]
  73. # pick for the first alternate link, but not from the crawler
  74. pdf_url = None
  75. for doc in result.get('alternatePaperLinks', []):
  76. if doc['linkType'] not in ('crawler', 'doi'):
  77. pdf_url = doc['url']
  78. break
  79. # comments
  80. comments = None
  81. if 'citationStats' in result:
  82. comments = gettext(
  83. '{numCitations} citations from the year {firstCitationVelocityYear} to {lastCitationVelocityYear}'
  84. ).format(
  85. numCitations=result['citationStats']['numCitations'],
  86. firstCitationVelocityYear=result['citationStats']['firstCitationVelocityYear'],
  87. lastCitationVelocityYear=result['citationStats']['lastCitationVelocityYear'],
  88. )
  89. results.append(
  90. {
  91. 'template': 'paper.html',
  92. 'url': url,
  93. 'title': result['title']['text'],
  94. 'content': html_to_text(result['paperAbstract']['text']),
  95. 'journal': result.get('venue', {}).get('text') or result.get('journal', {}).get('name'),
  96. 'doi': result.get('doiInfo', {}).get('doi'),
  97. 'tags': result.get('fieldsOfStudy'),
  98. 'authors': authors,
  99. 'pdf_url': pdf_url,
  100. 'publishedDate': publishedDate,
  101. 'comments': comments,
  102. }
  103. )
  104. return results