public_domain_image_archive.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Public domain image archive, based on the unsplash engine
  3. Meow meow
  4. """
  5. from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
  6. from json import dumps
  7. algolia_api_key = "153d2a10ce67a0be5484de130a132050"
  8. """Algolia API key. See engine documentation """
  9. THUMBNAIL_SUFFIX = "?fit=max&h=360&w=360"
  10. """
  11. Example thumbnail urls (from requests & html):
  12. - https://the-public-domain-review.imgix.net
  13. /shop/nov-2023-prints-00043.jpg
  14. ?fit=max&h=360&w=360
  15. - https://the-public-domain-review.imgix.net
  16. /collections/the-history-of-four-footed-beasts-and-serpents-1658/
  17. 8616383182_5740fa7851_o.jpg
  18. ?fit=max&h=360&w=360
  19. Example full image urls (from html)
  20. - https://the-public-domain-review.imgix.net/shop/
  21. nov-2023-prints-00043.jpg
  22. ?fit=clip&w=970&h=800&auto=format,compress
  23. - https://the-public-domain-review.imgix.net/collections/
  24. the-history-of-four-footed-beasts-and-serpents-1658/8616383182_5740fa7851_o.jpg
  25. ?fit=clip&w=310&h=800&auto=format,compress
  26. The thumbnail url from the request will be cleaned for the full image link
  27. The cleaned thumbnail url will have THUMBNAIL_SUFFIX added to them, based on the original thumbnail parameters
  28. """
  29. # about
  30. about = {
  31. "website": 'https://pdimagearchive.org',
  32. "use_official_api": False,
  33. "require_api_key": False,
  34. "results": 'JSON',
  35. }
  36. base_url = 'https://oqi2j6v4iz-dsn.algolia.net/'
  37. search_url = base_url + f'1/indexes/*/queries?x-algolia-api-key={algolia_api_key}&x-algolia-application-id=OQI2J6V4IZ'
  38. categories = ['images']
  39. page_size = 20
  40. paging = True
  41. def clean_url(url):
  42. parsed = urlparse(url)
  43. query = [(k, v) for (k, v) in parse_qsl(parsed.query) if k not in ['ixid', 's']]
  44. return urlunparse((parsed.scheme, parsed.netloc, parsed.path, parsed.params, urlencode(query), parsed.fragment))
  45. def request(query, params):
  46. params['url'] = search_url
  47. params["method"] = "POST"
  48. request_params = {
  49. "page": params["pageno"] - 1,
  50. "query": query,
  51. "highlightPostTag": "__ais-highlight__",
  52. "highlightPreTag": "__ais-highlight__",
  53. }
  54. data = {
  55. "requests": [
  56. {"indexName": "prod_all-images", "params": urlencode(request_params)},
  57. ]
  58. }
  59. params["data"] = dumps(data)
  60. logger.debug("query_url --> %s", params['url'])
  61. return params
  62. def response(resp):
  63. results = []
  64. json_data = resp.json()
  65. if 'results' not in json_data:
  66. return []
  67. for result in json_data['results'][0]['hits']:
  68. content = []
  69. if "themes" in result:
  70. content.append("Themes: " + result['themes'])
  71. if "encompassingWork" in result:
  72. content.append("Encompassing work: " + result['encompassingWork'])
  73. content = "\n".join(content)
  74. base_image_url = result['thumbnail'].split("?")[0]
  75. results.append(
  76. {
  77. 'template': 'images.html',
  78. 'url': clean_url(f"{about['website']}/images/{result['objectID']}"),
  79. 'img_src': clean_url(base_image_url),
  80. 'thumbnail_src': clean_url(base_image_url + THUMBNAIL_SUFFIX),
  81. 'title': f"{result['title'].strip()} by {result['artist']} {result.get('displayYear', '')}",
  82. 'content': content,
  83. }
  84. )
  85. return results