public_domain_image_archive.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Public domain image archive"""
  3. from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
  4. from json import dumps
  5. from searx.network import get
  6. from searx.utils import extr
  7. from searx.exceptions import SearxEngineAccessDeniedException, SearxEngineException
  8. THUMBNAIL_SUFFIX = "?fit=max&h=360&w=360"
  9. """
  10. Example thumbnail urls (from requests & html):
  11. - https://the-public-domain-review.imgix.net
  12. /shop/nov-2023-prints-00043.jpg
  13. ?fit=max&h=360&w=360
  14. - https://the-public-domain-review.imgix.net
  15. /collections/the-history-of-four-footed-beasts-and-serpents-1658/
  16. 8616383182_5740fa7851_o.jpg
  17. ?fit=max&h=360&w=360
  18. Example full image urls (from html)
  19. - https://the-public-domain-review.imgix.net/shop/
  20. nov-2023-prints-00043.jpg
  21. ?fit=clip&w=970&h=800&auto=format,compress
  22. - https://the-public-domain-review.imgix.net/collections/
  23. the-history-of-four-footed-beasts-and-serpents-1658/8616383182_5740fa7851_o.jpg
  24. ?fit=clip&w=310&h=800&auto=format,compress
  25. The thumbnail url from the request will be cleaned for the full image link
  26. The cleaned thumbnail url will have THUMBNAIL_SUFFIX added to them, based on the original thumbnail parameters
  27. """
  28. # about
  29. about = {
  30. "website": 'https://pdimagearchive.org',
  31. "use_official_api": False,
  32. "require_api_key": False,
  33. "results": 'JSON',
  34. }
  35. base_url = 'https://oqi2j6v4iz-dsn.algolia.net'
  36. pdia_base_url = 'https://pdimagearchive.org'
  37. pdia_search_url = pdia_base_url + '/search/?q='
  38. pdia_config_start = "/_astro/InfiniteSearch."
  39. pdia_config_end = ".js"
  40. categories = ['images']
  41. page_size = 20
  42. paging = True
  43. __CACHED_API_KEY = None
  44. def _clean_url(url):
  45. parsed = urlparse(url)
  46. query = [(k, v) for (k, v) in parse_qsl(parsed.query) if k not in ['ixid', 's']]
  47. return urlunparse((parsed.scheme, parsed.netloc, parsed.path, parsed.params, urlencode(query), parsed.fragment))
  48. def _get_algolia_api_key():
  49. global __CACHED_API_KEY # pylint:disable=global-statement
  50. if __CACHED_API_KEY:
  51. return __CACHED_API_KEY
  52. resp = get(pdia_search_url)
  53. if resp.status_code != 200:
  54. raise LookupError("Failed to fetch config location (and as such the API key) for PDImageArchive")
  55. pdia_config_filepart = extr(resp.text, pdia_config_start, pdia_config_end)
  56. pdia_config_url = pdia_base_url + pdia_config_start + pdia_config_filepart + pdia_config_end
  57. resp = get(pdia_config_url)
  58. if resp.status_code != 200:
  59. raise LookupError("Failed to obtain Algolia API key for PDImageArchive")
  60. api_key = extr(resp.text, 'const r="', '"', default=None)
  61. if api_key is None:
  62. raise LookupError("Couldn't obtain Algolia API key for PDImageArchive")
  63. __CACHED_API_KEY = api_key
  64. return api_key
  65. def _clear_cached_api_key():
  66. global __CACHED_API_KEY # pylint:disable=global-statement
  67. __CACHED_API_KEY = None
  68. def request(query, params):
  69. api_key = _get_algolia_api_key()
  70. args = {
  71. 'x-algolia-api-key': api_key,
  72. 'x-algolia-application-id': 'OQI2J6V4IZ',
  73. }
  74. params['url'] = f"{base_url}/1/indexes/*/queries?{urlencode(args)}"
  75. params["method"] = "POST"
  76. request_params = {
  77. "page": params["pageno"] - 1,
  78. "query": query,
  79. "highlightPostTag": "__ais-highlight__",
  80. "highlightPreTag": "__ais-highlight__",
  81. }
  82. data = {
  83. "requests": [
  84. {"indexName": "prod_all-images", "params": urlencode(request_params)},
  85. ]
  86. }
  87. params["data"] = dumps(data)
  88. # http errors are handled manually to be able to reset the api key
  89. params['raise_for_httperror'] = False
  90. return params
  91. def response(resp):
  92. results = []
  93. json_data = resp.json()
  94. if resp.status_code == 403:
  95. _clear_cached_api_key()
  96. raise SearxEngineAccessDeniedException()
  97. if resp.status_code != 200:
  98. raise SearxEngineException()
  99. if 'results' not in json_data:
  100. return []
  101. for result in json_data['results'][0]['hits']:
  102. content = []
  103. if "themes" in result:
  104. content.append("Themes: " + result['themes'])
  105. if "encompassingWork" in result:
  106. content.append("Encompassing work: " + result['encompassingWork'])
  107. content = "\n".join(content)
  108. base_image_url = result['thumbnail'].split("?")[0]
  109. results.append(
  110. {
  111. 'template': 'images.html',
  112. 'url': _clean_url(f"{about['website']}/images/{result['objectID']}"),
  113. 'img_src': _clean_url(base_image_url),
  114. 'thumbnail_src': _clean_url(base_image_url + THUMBNAIL_SUFFIX),
  115. 'title': f"{result['title'].strip()} by {result['artist']} {result.get('displayYear', '')}",
  116. 'content': content,
  117. }
  118. )
  119. return results