From c7af43ab33669106fb7b4ffefa6881fca5cce346 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 10:29:52 +0000 Subject: [PATCH] feat: restart stopped app containers on server startup Adds restore_app_containers() which runs at startup alongside restore_caddy_routes(). For each app with a successful deploy it inspects the container state via `podman inspect` and runs `podman start` if the container is exited (e.g. after a host reboot). Missing containers are logged as warnings requiring a manual redeploy. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- server/src/main.rs | 6 ++++ server/src/routes/apps.rs | 67 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/server/src/main.rs b/server/src/main.rs index 152bfe8..6bb8821 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -162,6 +162,12 @@ async fn main() -> anyhow::Result<()> { routes::apps::restore_caddy_routes(&restore_db).await; }); + // Restart any app containers that are stopped (e.g. after a host reboot). + let containers_db = state.db.clone(); + tokio::spawn(async move { + routes::apps::restore_app_containers(&containers_db).await; + }); + // ── Protected routes (admin login required) ─────────────────────────────── let protected = Router::new() .route("/", get(routes::ui::index)) diff --git a/server/src/routes/apps.rs b/server/src/routes/apps.rs index 58f8345..869ca4f 100644 --- a/server/src/routes/apps.rs +++ b/server/src/routes/apps.rs @@ -76,6 +76,73 @@ pub async fn restore_caddy_routes(db: &crate::DbPool) { tracing::info!("restore_caddy_routes: registered {} app routes", apps.len()); } +/// On startup, ensure every app that had a successful deploy is actually running. +/// If the host rebooted, containers will be in "exited" state — start them. +/// If a container is missing entirely, log a warning (we don't rebuild automatically). +pub async fn restore_app_containers(db: &crate::DbPool) { + let apps = match sqlx::query_as::<_, crate::models::App>("SELECT * FROM apps") + .fetch_all(db) + .await + { + Ok(a) => a, + Err(e) => { tracing::error!("restore_app_containers: DB error: {}", e); return; } + }; + + for app in &apps { + // Only care about apps that have at least one successful deploy. + let has_deploy: bool = sqlx::query_scalar( + "SELECT COUNT(*) > 0 FROM deploys WHERE app_id = ? AND status = 'success'" + ) + .bind(&app.id) + .fetch_one(db) + .await + .unwrap_or(false); + + if !has_deploy { + continue; + } + + let container = format!("hiy-{}", app.id); + + // Check container state via `podman inspect`. + let inspect = tokio::process::Command::new("podman") + .args(["inspect", "--format", "{{.State.Status}}", &container]) + .output() + .await; + + match inspect { + Ok(out) if out.status.success() => { + let status = String::from_utf8_lossy(&out.stdout).trim().to_string(); + if status == "running" { + tracing::debug!("restore_app_containers: {} already running", container); + } else { + tracing::info!("restore_app_containers: starting {} (was {})", container, status); + let start = tokio::process::Command::new("podman") + .args(["start", &container]) + .output() + .await; + match start { + Ok(o) if o.status.success() => + tracing::info!("restore_app_containers: {} started", container), + Ok(o) => + tracing::warn!("restore_app_containers: failed to start {}: {}", + container, String::from_utf8_lossy(&o.stderr).trim()), + Err(e) => + tracing::warn!("restore_app_containers: error starting {}: {}", container, e), + } + } + } + _ => { + tracing::warn!( + "restore_app_containers: container {} not found — redeploy needed", + container + ); + } + } + } + tracing::info!("restore_app_containers: done"); +} + /// Push a visibility change to Caddy without requiring a full redeploy. /// Best-effort: logs a warning on failure but does not surface an error to the caller. async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) {