tineye.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """This engine implements *Tineye - reverse image search*
  4. Using TinEye, you can search by image or perform what we call a reverse image
  5. search. You can do that by uploading an image or searching by URL. You can also
  6. simply drag and drop your images to start your search. TinEye constantly crawls
  7. the web and adds images to its index. Today, the TinEye index is over 50.2
  8. billion images `[tineye.com] <https://tineye.com/how>`_.
  9. .. hint::
  10. This SearXNG engine only supports *'searching by URL'* and it does not use
  11. the official API `[api.tineye.com] <https://api.tineye.com/python/docs/>`_.
  12. """
  13. from json import loads
  14. from urllib.parse import urlencode
  15. from datetime import datetime
  16. about = {
  17. "website": 'https://tineye.com',
  18. "wikidata_id": 'Q2382535',
  19. "official_api_documentation": 'https://api.tineye.com/python/docs/',
  20. "use_official_api": False,
  21. "require_api_key": False,
  22. "results": 'JSON',
  23. }
  24. categories = ['images']
  25. paging = True
  26. safesearch = False
  27. base_url = 'https://tineye.com'
  28. search_string = '/result_json/?page={page}&{query}'
  29. def request(query, params):
  30. # see https://github.com/TinEye/pytineye/blob/main/pytineye/api.py
  31. params['url'] = base_url + search_string.format(query=urlencode({'url': query}), page=params['pageno'])
  32. params['headers'].update(
  33. {
  34. 'Connection': 'keep-alive',
  35. 'Accept-Encoding': 'gzip, defalte, br',
  36. 'Host': 'tineye.com',
  37. 'DNT': '1',
  38. 'TE': 'trailers',
  39. }
  40. )
  41. return params
  42. def response(resp):
  43. results = []
  44. # Define wanted results
  45. json_data = loads(resp.text)
  46. number_of_results = json_data['num_matches']
  47. for i in json_data['matches']:
  48. image_format = i['format']
  49. width = i['width']
  50. height = i['height']
  51. thumbnail_src = i['image_url']
  52. backlink = i['domains'][0]['backlinks'][0]
  53. url = backlink['backlink']
  54. source = backlink['url']
  55. title = backlink['image_name']
  56. img_src = backlink['url']
  57. # Get and convert published date
  58. api_date = backlink['crawl_date'][:-3]
  59. publishedDate = datetime.fromisoformat(api_date)
  60. # Append results
  61. results.append(
  62. {
  63. 'template': 'images.html',
  64. 'url': url,
  65. 'thumbnail_src': thumbnail_src,
  66. 'source': source,
  67. 'title': title,
  68. 'img_src': img_src,
  69. 'format': image_format,
  70. 'widht': width,
  71. 'height': height,
  72. 'publishedDate': publishedDate,
  73. }
  74. )
  75. # Append number of results
  76. results.append({'number_of_results': number_of_results})
  77. return results