truenas-burnin/app/database.py
Brandon Walter b73b5251ae Initial commit — TrueNAS Burn-In Dashboard v0.5.0
Full-stack burn-in orchestration dashboard (Stages 1–6d complete):
FastAPI backend, SQLite/WAL, SSE live dashboard, mock TrueNAS server,
SMTP/webhook notifications, batch burn-in, settings UI, audit log,
stats page, cancel SMART/burn-in, drag-to-reorder stages.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-24 00:08:29 -05:00

143 lines
5 KiB
Python

import aiosqlite
from pathlib import Path
from app.config import settings
SCHEMA = """
CREATE TABLE IF NOT EXISTS drives (
id INTEGER PRIMARY KEY AUTOINCREMENT,
truenas_disk_id TEXT UNIQUE NOT NULL,
devname TEXT NOT NULL,
serial TEXT,
model TEXT,
size_bytes INTEGER,
temperature_c INTEGER,
smart_health TEXT DEFAULT 'UNKNOWN',
last_seen_at TEXT NOT NULL,
last_polled_at TEXT NOT NULL,
notes TEXT,
location TEXT
);
CREATE TABLE IF NOT EXISTS smart_tests (
id INTEGER PRIMARY KEY AUTOINCREMENT,
drive_id INTEGER NOT NULL REFERENCES drives(id) ON DELETE CASCADE,
test_type TEXT NOT NULL CHECK(test_type IN ('short', 'long')),
state TEXT NOT NULL DEFAULT 'idle',
percent INTEGER DEFAULT 0,
truenas_job_id INTEGER,
started_at TEXT,
eta_at TEXT,
finished_at TEXT,
error_text TEXT,
UNIQUE(drive_id, test_type)
);
CREATE TABLE IF NOT EXISTS burnin_jobs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
drive_id INTEGER NOT NULL REFERENCES drives(id),
profile TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'queued',
percent INTEGER DEFAULT 0,
stage_name TEXT,
operator TEXT NOT NULL,
created_at TEXT NOT NULL,
started_at TEXT,
finished_at TEXT,
error_text TEXT
);
CREATE TABLE IF NOT EXISTS burnin_stages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
burnin_job_id INTEGER NOT NULL REFERENCES burnin_jobs(id) ON DELETE CASCADE,
stage_name TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'pending',
percent INTEGER DEFAULT 0,
started_at TEXT,
finished_at TEXT,
duration_seconds REAL,
error_text TEXT
);
CREATE TABLE IF NOT EXISTS audit_events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
event_type TEXT NOT NULL,
drive_id INTEGER REFERENCES drives(id),
burnin_job_id INTEGER REFERENCES burnin_jobs(id),
operator TEXT,
message TEXT NOT NULL,
created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now'))
);
CREATE INDEX IF NOT EXISTS idx_smart_drive_type ON smart_tests(drive_id, test_type);
CREATE INDEX IF NOT EXISTS idx_burnin_jobs_drive ON burnin_jobs(drive_id, state);
CREATE INDEX IF NOT EXISTS idx_burnin_stages_job ON burnin_stages(burnin_job_id);
CREATE INDEX IF NOT EXISTS idx_audit_events_job ON audit_events(burnin_job_id);
"""
# Migrations for existing databases that predate schema additions.
# Each entry is tried with try/except — SQLite raises OperationalError
# ("duplicate column name") if the column already exists, which is safe to ignore.
_MIGRATIONS = [
"ALTER TABLE drives ADD COLUMN notes TEXT",
"ALTER TABLE drives ADD COLUMN location TEXT",
]
async def _run_migrations(db: aiosqlite.Connection) -> None:
for sql in _MIGRATIONS:
try:
await db.execute(sql)
except Exception:
pass # Column already exists — harmless
# Remove the old CHECK(profile IN ('quick','full')) constraint if present.
# SQLite can't ALTER a CHECK — requires a full table rebuild.
cur = await db.execute(
"SELECT sql FROM sqlite_master WHERE type='table' AND name='burnin_jobs'"
)
row = await cur.fetchone()
if row and "CHECK" in (row[0] or ""):
await db.executescript("""
PRAGMA foreign_keys=OFF;
CREATE TABLE burnin_jobs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
drive_id INTEGER NOT NULL REFERENCES drives(id),
profile TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'queued',
percent INTEGER DEFAULT 0,
stage_name TEXT,
operator TEXT NOT NULL,
created_at TEXT NOT NULL,
started_at TEXT,
finished_at TEXT,
error_text TEXT
);
INSERT INTO burnin_jobs_new SELECT * FROM burnin_jobs;
DROP TABLE burnin_jobs;
ALTER TABLE burnin_jobs_new RENAME TO burnin_jobs;
CREATE INDEX IF NOT EXISTS idx_burnin_jobs_drive ON burnin_jobs(drive_id, state);
PRAGMA foreign_keys=ON;
""")
async def init_db() -> None:
Path(settings.db_path).parent.mkdir(parents=True, exist_ok=True)
async with aiosqlite.connect(settings.db_path) as db:
await db.execute("PRAGMA journal_mode=WAL")
await db.execute("PRAGMA foreign_keys=ON")
await db.executescript(SCHEMA)
await _run_migrations(db)
await db.commit()
async def get_db():
db = await aiosqlite.connect(settings.db_path)
db.row_factory = aiosqlite.Row
try:
await db.execute("PRAGMA journal_mode=WAL")
await db.execute("PRAGMA foreign_keys=ON")
yield db
finally:
await db.close()