base.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """BASE (Scholar publications)
  3. """
  4. from datetime import datetime
  5. import re
  6. from urllib.parse import urlencode
  7. from lxml import etree
  8. from searx.utils import searx_useragent
  9. # about
  10. about = {
  11. "website": 'https://base-search.net',
  12. "wikidata_id": 'Q448335',
  13. "official_api_documentation": 'https://api.base-search.net/',
  14. "use_official_api": True,
  15. "require_api_key": False,
  16. "results": 'XML',
  17. }
  18. categories = ['science']
  19. base_url = (
  20. 'https://api.base-search.net/cgi-bin/BaseHttpSearchInterface.fcgi'
  21. + '?func=PerformSearch&{query}&boost=oa&hits={hits}&offset={offset}'
  22. )
  23. # engine dependent config
  24. paging = True
  25. number_of_results = 10
  26. # shortcuts for advanced search
  27. shorcut_dict = {
  28. # user-friendly keywords
  29. 'format:': 'dcformat:',
  30. 'author:': 'dccreator:',
  31. 'collection:': 'dccollection:',
  32. 'hdate:': 'dchdate:',
  33. 'contributor:': 'dccontributor:',
  34. 'coverage:': 'dccoverage:',
  35. 'date:': 'dcdate:',
  36. 'abstract:': 'dcdescription:',
  37. 'urls:': 'dcidentifier:',
  38. 'language:': 'dclanguage:',
  39. 'publisher:': 'dcpublisher:',
  40. 'relation:': 'dcrelation:',
  41. 'rights:': 'dcrights:',
  42. 'source:': 'dcsource:',
  43. 'subject:': 'dcsubject:',
  44. 'title:': 'dctitle:',
  45. 'type:': 'dcdctype:',
  46. }
  47. def request(query, params):
  48. # replace shortcuts with API advanced search keywords
  49. for key, val in shorcut_dict.items():
  50. query = re.sub(key, val, query)
  51. # basic search
  52. offset = (params['pageno'] - 1) * number_of_results
  53. string_args = {
  54. 'query': urlencode({'query': query}),
  55. 'offset': offset,
  56. 'hits': number_of_results,
  57. }
  58. params['url'] = base_url.format(**string_args)
  59. params['headers']['User-Agent'] = searx_useragent()
  60. return params
  61. def response(resp):
  62. results = []
  63. search_results = etree.XML(resp.content)
  64. for entry in search_results.xpath('./result/doc'):
  65. content = "No description available"
  66. date = datetime.now() # needed in case no dcdate is available for an item
  67. for item in entry:
  68. if item.attrib["name"] == "dcdate":
  69. date = item.text
  70. elif item.attrib["name"] == "dctitle":
  71. title = item.text
  72. elif item.attrib["name"] == "dclink":
  73. url = item.text
  74. elif item.attrib["name"] == "dcdescription":
  75. content = item.text[:300]
  76. if len(item.text) > 300:
  77. content += "..."
  78. # dates returned by the BASE API are not several formats
  79. publishedDate = None
  80. for date_format in ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d', '%Y-%m', '%Y']:
  81. try:
  82. publishedDate = datetime.strptime(date, date_format)
  83. break
  84. except: # pylint: disable=bare-except
  85. pass
  86. if publishedDate is not None:
  87. res_dict = {'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content}
  88. else:
  89. res_dict = {'url': url, 'title': title, 'content': content}
  90. results.append(res_dict)
  91. return results