truenas-burnin/claude-sandbox/truenas-burnin/app/database.py
echoparkbaby 3e0000528f TrueNAS Burn-In Dashboard v0.9.0 — Live mode, thermal monitoring, adaptive concurrency
Go live against real TrueNAS SCALE 25.10:
- Remove mock-truenas dependency; mount SSH key as Docker secret
- Filter expired disk records from /api/v2.0/disk (expiretime field)
- Route all SMART operations through SSH (SCALE 25.10 removed REST smart/test endpoint)
- Poll drive temperatures via POST /api/v2.0/disk/temperatures (SCALE-specific)
- Store raw smartctl output in smart_tests.raw_output for proof of test execution
- Fix percent-remaining=0 false jump to 100% on test start
- Fix terminal WebSocket: add mounted key file fallback (/run/secrets/ssh_key)
- Fix WebSocket support: uvicorn → uvicorn[standard] (installs websockets)

HBA/system sensor temps on dashboard:
- SSH to TrueNAS and run sensors -j each poll cycle
- Parse coretemp (CPU package) and pch_* (PCH/chipset — storage I/O proxy)
- Render as compact chips in stats bar, color-coded green/yellow/red
- Live updates via new SSE system-sensors event every 12s

Adaptive concurrency signal:
- Thermal pressure indicator in stats bar: hidden when OK, WARM/HOT when running
  burn-in drives hit temp_warn_c / temp_crit_c thresholds
- Thermal gate in burn-in queue: jobs wait up to 3 min before acquiring semaphore
  slot if running drives are already at warning temp; times out and proceeds

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-27 06:33:36 -05:00

150 lines
5.4 KiB
Python

import aiosqlite
from pathlib import Path
from app.config import settings
SCHEMA = """
CREATE TABLE IF NOT EXISTS drives (
id INTEGER PRIMARY KEY AUTOINCREMENT,
truenas_disk_id TEXT UNIQUE NOT NULL,
devname TEXT NOT NULL,
serial TEXT,
model TEXT,
size_bytes INTEGER,
temperature_c INTEGER,
smart_health TEXT DEFAULT 'UNKNOWN',
last_seen_at TEXT NOT NULL,
last_polled_at TEXT NOT NULL,
notes TEXT,
location TEXT
);
CREATE TABLE IF NOT EXISTS smart_tests (
id INTEGER PRIMARY KEY AUTOINCREMENT,
drive_id INTEGER NOT NULL REFERENCES drives(id) ON DELETE CASCADE,
test_type TEXT NOT NULL CHECK(test_type IN ('short', 'long')),
state TEXT NOT NULL DEFAULT 'idle',
percent INTEGER DEFAULT 0,
truenas_job_id INTEGER,
started_at TEXT,
eta_at TEXT,
finished_at TEXT,
error_text TEXT,
UNIQUE(drive_id, test_type)
);
CREATE TABLE IF NOT EXISTS burnin_jobs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
drive_id INTEGER NOT NULL REFERENCES drives(id),
profile TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'queued',
percent INTEGER DEFAULT 0,
stage_name TEXT,
operator TEXT NOT NULL,
created_at TEXT NOT NULL,
started_at TEXT,
finished_at TEXT,
error_text TEXT
);
CREATE TABLE IF NOT EXISTS burnin_stages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
burnin_job_id INTEGER NOT NULL REFERENCES burnin_jobs(id) ON DELETE CASCADE,
stage_name TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'pending',
percent INTEGER DEFAULT 0,
started_at TEXT,
finished_at TEXT,
duration_seconds REAL,
error_text TEXT
);
CREATE TABLE IF NOT EXISTS audit_events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
event_type TEXT NOT NULL,
drive_id INTEGER REFERENCES drives(id),
burnin_job_id INTEGER REFERENCES burnin_jobs(id),
operator TEXT,
message TEXT NOT NULL,
created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now'))
);
CREATE INDEX IF NOT EXISTS idx_smart_drive_type ON smart_tests(drive_id, test_type);
CREATE INDEX IF NOT EXISTS idx_burnin_jobs_drive ON burnin_jobs(drive_id, state);
CREATE INDEX IF NOT EXISTS idx_burnin_stages_job ON burnin_stages(burnin_job_id);
CREATE INDEX IF NOT EXISTS idx_audit_events_job ON audit_events(burnin_job_id);
"""
# Migrations for existing databases that predate schema additions.
# Each entry is tried with try/except — SQLite raises OperationalError
# ("duplicate column name") if the column already exists, which is safe to ignore.
_MIGRATIONS = [
"ALTER TABLE drives ADD COLUMN notes TEXT",
"ALTER TABLE drives ADD COLUMN location TEXT",
# Stage 7: SSH command output + SMART attribute storage
"ALTER TABLE burnin_stages ADD COLUMN log_text TEXT",
"ALTER TABLE burnin_stages ADD COLUMN bad_blocks INTEGER DEFAULT 0",
"ALTER TABLE drives ADD COLUMN smart_attrs TEXT",
"ALTER TABLE smart_tests ADD COLUMN raw_output TEXT",
# Stage 8: track last reset time so dashboard burn-in col clears after reset
"ALTER TABLE drives ADD COLUMN last_reset_at TEXT",
]
async def _run_migrations(db: aiosqlite.Connection) -> None:
for sql in _MIGRATIONS:
try:
await db.execute(sql)
except Exception:
pass # Column already exists — harmless
# Remove the old CHECK(profile IN ('quick','full')) constraint if present.
# SQLite can't ALTER a CHECK — requires a full table rebuild.
cur = await db.execute(
"SELECT sql FROM sqlite_master WHERE type='table' AND name='burnin_jobs'"
)
row = await cur.fetchone()
if row and "CHECK" in (row[0] or ""):
await db.executescript("""
PRAGMA foreign_keys=OFF;
CREATE TABLE burnin_jobs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
drive_id INTEGER NOT NULL REFERENCES drives(id),
profile TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'queued',
percent INTEGER DEFAULT 0,
stage_name TEXT,
operator TEXT NOT NULL,
created_at TEXT NOT NULL,
started_at TEXT,
finished_at TEXT,
error_text TEXT
);
INSERT INTO burnin_jobs_new SELECT * FROM burnin_jobs;
DROP TABLE burnin_jobs;
ALTER TABLE burnin_jobs_new RENAME TO burnin_jobs;
CREATE INDEX IF NOT EXISTS idx_burnin_jobs_drive ON burnin_jobs(drive_id, state);
PRAGMA foreign_keys=ON;
""")
async def init_db() -> None:
Path(settings.db_path).parent.mkdir(parents=True, exist_ok=True)
async with aiosqlite.connect(settings.db_path) as db:
await db.execute("PRAGMA journal_mode=WAL")
await db.execute("PRAGMA foreign_keys=ON")
await db.executescript(SCHEMA)
await _run_migrations(db)
await db.commit()
async def get_db():
db = await aiosqlite.connect(settings.db_path)
db.row_factory = aiosqlite.Row
try:
await db.execute("PRAGMA journal_mode=WAL")
await db.execute("PRAGMA foreign_keys=ON")
yield db
finally:
await db.close()