base.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. #!/usr/bin/env python
  2. """
  3. BASE (Scholar publications)
  4. @website https://base-search.net
  5. @provide-api yes with authorization (https://api.base-search.net/)
  6. @using-api yes
  7. @results XML
  8. @stable ?
  9. @parse url, title, publishedDate, content
  10. More info on api: http://base-search.net/about/download/base_interface.pdf
  11. """
  12. from lxml import etree
  13. from urllib import urlencode
  14. from searx.utils import searx_useragent
  15. from cgi import escape
  16. from datetime import datetime
  17. import re
  18. categories = ['science']
  19. base_url = 'https://api.base-search.net/cgi-bin/BaseHttpSearchInterface.fcgi'\
  20. + '?func=PerformSearch&{query}&boost=oa&hits={hits}&offset={offset}'
  21. # engine dependent config
  22. paging = True
  23. number_of_results = 10
  24. # shortcuts for advanced search
  25. shorcut_dict = {
  26. # user-friendly keywords
  27. 'format:': 'dcformat:',
  28. 'author:': 'dccreator:',
  29. 'collection:': 'dccollection:',
  30. 'hdate:': 'dchdate:',
  31. 'contributor:': 'dccontributor:',
  32. 'coverage:': 'dccoverage:',
  33. 'date:': 'dcdate:',
  34. 'abstract:': 'dcdescription:',
  35. 'urls:': 'dcidentifier:',
  36. 'language:': 'dclanguage:',
  37. 'publisher:': 'dcpublisher:',
  38. 'relation:': 'dcrelation:',
  39. 'rights:': 'dcrights:',
  40. 'source:': 'dcsource:',
  41. 'subject:': 'dcsubject:',
  42. 'title:': 'dctitle:',
  43. 'type:': 'dcdctype:'
  44. }
  45. def request(query, params):
  46. # replace shortcuts with API advanced search keywords
  47. for key in shorcut_dict.keys():
  48. query = re.sub(str(key), str(shorcut_dict[key]), query)
  49. # basic search
  50. offset = (params['pageno'] - 1) * number_of_results
  51. string_args = dict(query=urlencode({'query': query}),
  52. offset=offset,
  53. hits=number_of_results)
  54. params['url'] = base_url.format(**string_args)
  55. params['headers']['User-Agent'] = searx_useragent()
  56. return params
  57. def response(resp):
  58. results = []
  59. search_results = etree.XML(resp.content)
  60. for entry in search_results.xpath('./result/doc'):
  61. content = "No description available"
  62. date = datetime.now() # needed in case no dcdate is available for an item
  63. for item in entry:
  64. if item.attrib["name"] == "dchdate":
  65. harvestDate = item.text
  66. elif item.attrib["name"] == "dcdate":
  67. date = item.text
  68. elif item.attrib["name"] == "dctitle":
  69. title = item.text
  70. elif item.attrib["name"] == "dclink":
  71. url = item.text
  72. elif item.attrib["name"] == "dcdescription":
  73. content = escape(item.text[:300])
  74. if len(item.text) > 300:
  75. content += "..."
  76. # dates returned by the BASE API are not several formats
  77. publishedDate = None
  78. for date_format in ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d', '%Y-%m', '%Y']:
  79. try:
  80. publishedDate = datetime.strptime(date, date_format)
  81. break
  82. except:
  83. pass
  84. if publishedDate is not None:
  85. res_dict = {'url': url,
  86. 'title': title,
  87. 'publishedDate': publishedDate,
  88. 'content': content}
  89. else:
  90. res_dict = {'url': url,
  91. 'title': title,
  92. 'content': content}
  93. results.append(res_dict)
  94. return results