flickr_noapi.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Flickr (Images)
  4. """
  5. from json import loads
  6. from time import time
  7. import re
  8. from urllib.parse import urlencode
  9. from searx.utils import ecma_unescape, html_to_text
  10. # about
  11. about = {
  12. "website": 'https://www.flickr.com',
  13. "wikidata_id": 'Q103204',
  14. "official_api_documentation": 'https://secure.flickr.com/services/api/flickr.photos.search.html',
  15. "use_official_api": False,
  16. "require_api_key": False,
  17. "results": 'HTML',
  18. }
  19. categories = ['images']
  20. url = 'https://www.flickr.com/'
  21. search_url = url + 'search?{query}&page={page}'
  22. time_range_url = '&min_upload_date={start}&max_upload_date={end}'
  23. photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
  24. modelexport_re = re.compile(r"^\s*modelExport:\s*({.*}),$", re.M)
  25. image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
  26. paging = True
  27. time_range_support = True
  28. time_range_dict = {
  29. 'day': 60 * 60 * 24,
  30. 'week': 60 * 60 * 24 * 7,
  31. 'month': 60 * 60 * 24 * 7 * 4,
  32. 'year': 60 * 60 * 24 * 7 * 52,
  33. }
  34. def build_flickr_url(user_id, photo_id):
  35. return photo_url.format(userid=user_id, photoid=photo_id)
  36. def _get_time_range_url(time_range):
  37. if time_range in time_range_dict:
  38. return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))
  39. return ''
  40. def request(query, params):
  41. params['url'] = search_url.format(query=urlencode({'text': query}), page=params['pageno']) + _get_time_range_url(
  42. params['time_range']
  43. )
  44. return params
  45. def response(resp):
  46. results = []
  47. matches = modelexport_re.search(resp.text)
  48. if matches is None:
  49. return results
  50. match = matches.group(1)
  51. model_export = loads(match)
  52. if 'legend' not in model_export:
  53. return results
  54. legend = model_export['legend']
  55. # handle empty page
  56. if not legend or not legend[0]:
  57. return results
  58. for index in legend:
  59. photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]
  60. author = ecma_unescape(photo.get('realname', ''))
  61. source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'
  62. title = ecma_unescape(photo.get('title', ''))
  63. content = html_to_text(ecma_unescape(photo.get('description', '')))
  64. img_src = None
  65. # From the biggest to the lowest format
  66. for image_size in image_sizes:
  67. if image_size in photo['sizes']:
  68. img_src = photo['sizes'][image_size]['url']
  69. img_format = (
  70. 'jpg ' + str(photo['sizes'][image_size]['width']) + 'x' + str(photo['sizes'][image_size]['height'])
  71. )
  72. break
  73. if not img_src:
  74. logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
  75. continue
  76. # For a bigger thumbnail, keep only the url_z, not the url_n
  77. if 'n' in photo['sizes']:
  78. thumbnail_src = photo['sizes']['n']['url']
  79. elif 'z' in photo['sizes']:
  80. thumbnail_src = photo['sizes']['z']['url']
  81. else:
  82. thumbnail_src = img_src
  83. if 'ownerNsid' not in photo:
  84. # should not happen, disowned photo? Show it anyway
  85. url = img_src
  86. else:
  87. url = build_flickr_url(photo['ownerNsid'], photo['id'])
  88. result = {
  89. 'url': url,
  90. 'img_src': img_src,
  91. 'thumbnail_src': thumbnail_src,
  92. 'source': source,
  93. 'img_format': img_format,
  94. 'template': 'images.html',
  95. }
  96. result['author'] = author.encode(errors='ignore').decode()
  97. result['source'] = source.encode(errors='ignore').decode()
  98. result['title'] = title.encode(errors='ignore').decode()
  99. result['content'] = content.encode(errors='ignore').decode()
  100. results.append(result)
  101. return results