tracker_patterns.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Simple implementation to store TrackerPatterns data in a SQL database."""
  3. from __future__ import annotations
  4. import typing
  5. __all__ = ["TrackerPatternsDB"]
  6. import re
  7. from collections.abc import Iterator
  8. from urllib.parse import urlparse, urlunparse, parse_qsl, urlencode
  9. from httpx import HTTPError
  10. from searx.data.core import get_cache, log
  11. from searx.network import get as http_get
  12. RuleType = tuple[str, list[str], list[str]]
  13. class TrackerPatternsDB:
  14. # pylint: disable=missing-class-docstring
  15. ctx_name = "data_tracker_patterns"
  16. CLEAR_LIST_URL = [
  17. # ClearURL rule lists, the first one that responds HTTP 200 is used
  18. "https://rules1.clearurls.xyz/data.minify.json",
  19. "https://rules2.clearurls.xyz/data.minify.json",
  20. "https://raw.githubusercontent.com/ClearURLs/Rules/refs/heads/master/data.min.json",
  21. ]
  22. class Fields:
  23. # pylint: disable=too-few-public-methods, invalid-name
  24. url_regexp: typing.Final = 0 # URL (regular expression) match condition of the link
  25. url_ignore: typing.Final = 1 # URL (regular expression) to ignore
  26. del_args: typing.Final = 2 # list of URL arguments (regular expression) to delete
  27. def __init__(self):
  28. self.cache = get_cache()
  29. def init(self):
  30. if self.cache.properties("tracker_patterns loaded") != "OK":
  31. # To avoid parallel initializations, the property is set first
  32. self.cache.properties.set("tracker_patterns loaded", "OK")
  33. self.load()
  34. # F I X M E:
  35. # do we need a maintenance .. remember: database is stored
  36. # in /tmp and will be rebuild during the reboot anyway
  37. def load(self):
  38. log.debug("init searx.data.TRACKER_PATTERNS")
  39. for rule in self.iter_clear_list():
  40. self.add(rule)
  41. def add(self, rule: RuleType):
  42. self.cache.set(
  43. key=rule[self.Fields.url_regexp],
  44. value=(
  45. rule[self.Fields.url_ignore],
  46. rule[self.Fields.del_args],
  47. ),
  48. ctx=self.ctx_name,
  49. expire=None,
  50. )
  51. def rules(self) -> Iterator[RuleType]:
  52. self.init()
  53. for key, value in self.cache.pairs(ctx=self.ctx_name):
  54. yield key, value[0], value[1]
  55. def iter_clear_list(self) -> Iterator[RuleType]:
  56. resp = None
  57. for url in self.CLEAR_LIST_URL:
  58. log.debug("TRACKER_PATTERNS: Trying to fetch %s...", url)
  59. try:
  60. resp = http_get(url, timeout=3)
  61. except HTTPError as exc:
  62. log.warning("TRACKER_PATTERNS: HTTPError (%s) occured while fetching %s", url, exc)
  63. continue
  64. if resp.status_code != 200:
  65. log.warning(f"TRACKER_PATTERNS: ClearURL ignore HTTP {resp.status_code} {url}")
  66. continue
  67. break
  68. if resp is None:
  69. log.error("TRACKER_PATTERNS: failed fetching ClearURL rule lists")
  70. return
  71. for rule in resp.json()["providers"].values():
  72. yield (
  73. rule["urlPattern"].replace("\\\\", "\\"), # fix javascript regex syntax
  74. [exc.replace("\\\\", "\\") for exc in rule.get("exceptions", [])],
  75. rule.get("rules", []),
  76. )
  77. def clean_url(self, url: str) -> bool | str:
  78. """The URL arguments are normalized and cleaned of tracker parameters.
  79. Returns bool ``True`` to use URL unchanged (``False`` to ignore URL).
  80. If URL should be modified, the returned string is the new URL to use.
  81. """
  82. new_url = url
  83. parsed_new_url = urlparse(url=new_url)
  84. for rule in self.rules():
  85. if not re.match(rule[self.Fields.url_regexp], new_url):
  86. # no match / ignore pattern
  87. continue
  88. do_ignore = False
  89. for pattern in rule[self.Fields.url_ignore]:
  90. if re.match(pattern, new_url):
  91. do_ignore = True
  92. break
  93. if do_ignore:
  94. # pattern is in the list of exceptions / ignore pattern
  95. # HINT:
  96. # we can't break the outer pattern loop since we have
  97. # overlapping urlPattern like ".*"
  98. continue
  99. # remove tracker arguments from the url-query part
  100. query_args: list[tuple[str, str]] = list(parse_qsl(parsed_new_url.query))
  101. for name, val in query_args.copy():
  102. # remove URL arguments
  103. for pattern in rule[self.Fields.del_args]:
  104. if re.match(pattern, name):
  105. log.debug("TRACKER_PATTERNS: %s remove tracker arg: %s='%s'", parsed_new_url.netloc, name, val)
  106. query_args.remove((name, val))
  107. parsed_new_url = parsed_new_url._replace(query=urlencode(query_args))
  108. new_url = urlunparse(parsed_new_url)
  109. if new_url != url:
  110. return new_url
  111. return True
  112. if __name__ == "__main__":
  113. db = TrackerPatternsDB()
  114. for r in db.rules():
  115. print(r)