flickr_noapi.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Flickr (Images)
  4. """
  5. from json import loads
  6. from time import time
  7. import re
  8. from urllib.parse import urlencode
  9. from searx.utils import ecma_unescape, html_to_text
  10. # about
  11. about = {
  12. "website": 'https://www.flickr.com',
  13. "wikidata_id": 'Q103204',
  14. "official_api_documentation": 'https://secure.flickr.com/services/api/flickr.photos.search.html',
  15. "use_official_api": False,
  16. "require_api_key": False,
  17. "results": 'HTML',
  18. }
  19. categories = ['images']
  20. url = 'https://www.flickr.com/'
  21. search_url = url + 'search?{query}&page={page}'
  22. time_range_url = '&min_upload_date={start}&max_upload_date={end}'
  23. photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
  24. modelexport_re = re.compile(r"^\s*modelExport:\s*({.*}),$", re.M)
  25. image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
  26. paging = True
  27. time_range_support = True
  28. time_range_dict = {'day': 60 * 60 * 24,
  29. 'week': 60 * 60 * 24 * 7,
  30. 'month': 60 * 60 * 24 * 7 * 4,
  31. 'year': 60 * 60 * 24 * 7 * 52}
  32. def build_flickr_url(user_id, photo_id):
  33. return photo_url.format(userid=user_id, photoid=photo_id)
  34. def _get_time_range_url(time_range):
  35. if time_range in time_range_dict:
  36. return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))
  37. return ''
  38. def request(query, params):
  39. params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])
  40. + _get_time_range_url(params['time_range']))
  41. return params
  42. def response(resp):
  43. results = []
  44. matches = modelexport_re.search(resp.text)
  45. if matches is None:
  46. return results
  47. match = matches.group(1)
  48. model_export = loads(match)
  49. if 'legend' not in model_export:
  50. return results
  51. legend = model_export['legend']
  52. # handle empty page
  53. if not legend or not legend[0]:
  54. return results
  55. for index in legend:
  56. photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]
  57. author = ecma_unescape(photo.get('realname', ''))
  58. source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'
  59. title = ecma_unescape(photo.get('title', ''))
  60. content = html_to_text(ecma_unescape(photo.get('description', '')))
  61. img_src = None
  62. # From the biggest to the lowest format
  63. for image_size in image_sizes:
  64. if image_size in photo['sizes']:
  65. img_src = photo['sizes'][image_size]['url']
  66. img_format = 'jpg ' \
  67. + str(photo['sizes'][image_size]['width']) \
  68. + 'x' \
  69. + str(photo['sizes'][image_size]['height'])
  70. break
  71. if not img_src:
  72. logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
  73. continue
  74. # For a bigger thumbnail, keep only the url_z, not the url_n
  75. if 'n' in photo['sizes']:
  76. thumbnail_src = photo['sizes']['n']['url']
  77. elif 'z' in photo['sizes']:
  78. thumbnail_src = photo['sizes']['z']['url']
  79. else:
  80. thumbnail_src = img_src
  81. if 'ownerNsid' not in photo:
  82. # should not happen, disowned photo? Show it anyway
  83. url = img_src
  84. else:
  85. url = build_flickr_url(photo['ownerNsid'], photo['id'])
  86. result = {
  87. 'url': url,
  88. 'img_src': img_src,
  89. 'thumbnail_src': thumbnail_src,
  90. 'source': source,
  91. 'img_format': img_format,
  92. 'template': 'images.html'
  93. }
  94. result['author'] = author.encode(errors='ignore').decode()
  95. result['source'] = source.encode(errors='ignore').decode()
  96. result['title'] = title.encode(errors='ignore').decode()
  97. result['content'] = content.encode(errors='ignore').decode()
  98. results.append(result)
  99. return results