1from __future__ import annotations
2
3from typing import Any, Protocol
4
5from .checks_cumulative import (
6 check_index_bloat,
7 check_missing_index_candidates,
8 check_stats_freshness,
9 check_table_bloat,
10 check_unused_indexes,
11 check_vacuum_health,
12)
13from .checks_snapshot import check_blocking_queries, check_long_running_connections
14from .checks_structural import (
15 check_duplicate_indexes,
16 check_invalid_indexes,
17 check_missing_fk_indexes,
18 check_sequence_exhaustion,
19)
20from .context import gather_context
21from .types import CheckResult, CheckTier, TableOwner
22
23
24class CheckFn(Protocol):
25 """Shape of every check function in ALL_CHECKS.
26
27 Plain ``Callable[...]`` would type-check the call signature but strips
28 `__name__`, which `run_all_checks` uses to synthesize a CheckResult
29 when a check raises unexpectedly. Declaring the Protocol preserves
30 both — and the set-of-callables tier registry below can be typed
31 against it without casts.
32 """
33
34 __name__: str
35
36 def __call__(
37 self, cursor: Any, table_owners: dict[str, TableOwner]
38 ) -> CheckResult: ...
39
40
41ALL_CHECKS: list[CheckFn] = [
42 # Structural (always-real).
43 check_invalid_indexes,
44 check_duplicate_indexes,
45 check_missing_fk_indexes,
46 check_sequence_exhaustion,
47 # Upstream-issue — fix first so operational checks are trustworthy.
48 check_stats_freshness,
49 # Operational (depends on cumulative stats).
50 check_vacuum_health,
51 check_table_bloat,
52 check_index_bloat,
53 check_unused_indexes,
54 check_missing_index_candidates,
55 # Point-in-time snapshot.
56 check_long_running_connections,
57 check_blocking_queries,
58]
59
60# Checks whose findings render as operational context, not alarms. Kept as a
61# frozenset of callables (not a name→tier map) so renaming a check function
62# forces a same-commit update here — no string drift, no type ignores.
63# The success path reads tier from the CheckResult the check returns; this
64# set only controls the fallback CheckResult synthesized on unexpected
65# exceptions in run_all_checks. Warning-tier is the default.
66_OPERATIONAL_CHECKS: frozenset[CheckFn] = frozenset(
67 {
68 check_stats_freshness,
69 check_vacuum_health,
70 check_table_bloat,
71 check_index_bloat,
72 }
73)
74
75
76def _tier_for(check_fn: CheckFn) -> CheckTier:
77 return "operational" if check_fn in _OPERATIONAL_CHECKS else "warning"
78
79
80def _apply_cross_check_caveats(results: list[CheckResult]) -> None:
81 """Annotate findings with context from other checks.
82
83 Some checks depend on data other checks produce — e.g. `unused_indexes`
84 relies on idx_scan counters but the planner's choice of index can be
85 skewed if ANALYZE hasn't run (stats_freshness) or if the table has heavy
86 bloat (vacuum_health). Without this pass, a user can't tell that an
87 "unused index" finding may be an artifact of another problem.
88
89 Each caveat is a short, plain-language string attached to the item in a
90 new `caveats` list. The CLI renders these dim, under the suggestion.
91 """
92 # Build tables → set of check names that flagged them.
93 flagged_by: dict[str, set[str]] = {}
94 for r in results:
95 for item in r["items"]:
96 if item["table"]:
97 flagged_by.setdefault(item["table"], set()).add(r["name"])
98
99 # affected_check -> list of (upstream_check, caveat_text)
100 CAVEATS: dict[str, list[tuple[str, str]]] = {
101 "unused_indexes": [
102 (
103 "stats_freshness",
104 "planner statistics on this table are absent or stale — the "
105 "planner may be picking sub-optimal plans that bypass this "
106 "index; re-check after ANALYZE",
107 ),
108 (
109 "vacuum_health",
110 "heavy dead-tuple bloat on this table may be skewing scan "
111 "counts — clear the bloat before deciding to drop",
112 ),
113 ],
114 "missing_index_candidates": [
115 (
116 "stats_freshness",
117 "this table's planner stats are absent or stale — the "
118 "sequential-scan evidence is still valid, but query plans "
119 "may shift after ANALYZE",
120 ),
121 ],
122 "vacuum_health": [
123 (
124 "table_bloat",
125 "this table also shows page-level bloat — VACUUM (ANALYZE) "
126 "reclaims dead tuples for reuse but does not shrink the "
127 "relation; see table_bloat for rewrite options",
128 ),
129 ],
130 "table_bloat": [
131 (
132 "vacuum_health",
133 "this table also has significant dead tuples — run "
134 "VACUUM (ANALYZE) first, then re-measure before deciding "
135 "whether a rewrite is still needed",
136 ),
137 ],
138 }
139
140 for r in results:
141 rules = CAVEATS.get(r["name"])
142 if not rules:
143 continue
144 for item in r["items"]:
145 table = item["table"]
146 if not table:
147 continue
148 flagged = flagged_by.get(table, set())
149 for upstream, text in rules:
150 if upstream in flagged:
151 item["caveats"].append(text)
152
153
154def run_all_checks(
155 cursor: Any, table_owners: dict[str, TableOwner]
156) -> tuple[list[CheckResult], dict[str, Any]]:
157 results: list[CheckResult] = []
158 for check_fn in ALL_CHECKS:
159 try:
160 result = check_fn(cursor, table_owners)
161 except Exception as e:
162 name = check_fn.__name__.removeprefix("check_")
163 result = CheckResult(
164 name=name,
165 label=name.replace("_", " ").title(),
166 status="error",
167 summary="error",
168 items=[],
169 message=str(e),
170 tier=_tier_for(check_fn),
171 )
172 results.append(result)
173
174 _apply_cross_check_caveats(results)
175
176 context = gather_context(cursor, table_owners)
177 return results, context