claude/heroku-clone-mvp-plan-NREhc #1

Merged
sander merged 42 commits from claude/heroku-clone-mvp-plan-NREhc into main 2026-03-29 07:24:40 +00:00
2 changed files with 73 additions and 0 deletions
Showing only changes of commit c7af43ab33 - Show all commits

View file

@ -162,6 +162,12 @@ async fn main() -> anyhow::Result<()> {
routes::apps::restore_caddy_routes(&restore_db).await; routes::apps::restore_caddy_routes(&restore_db).await;
}); });
// Restart any app containers that are stopped (e.g. after a host reboot).
let containers_db = state.db.clone();
tokio::spawn(async move {
routes::apps::restore_app_containers(&containers_db).await;
});
// ── Protected routes (admin login required) ─────────────────────────────── // ── Protected routes (admin login required) ───────────────────────────────
let protected = Router::new() let protected = Router::new()
.route("/", get(routes::ui::index)) .route("/", get(routes::ui::index))

View file

@ -76,6 +76,73 @@ pub async fn restore_caddy_routes(db: &crate::DbPool) {
tracing::info!("restore_caddy_routes: registered {} app routes", apps.len()); tracing::info!("restore_caddy_routes: registered {} app routes", apps.len());
} }
/// On startup, ensure every app that had a successful deploy is actually running.
/// If the host rebooted, containers will be in "exited" state — start them.
/// If a container is missing entirely, log a warning (we don't rebuild automatically).
pub async fn restore_app_containers(db: &crate::DbPool) {
let apps = match sqlx::query_as::<_, crate::models::App>("SELECT * FROM apps")
.fetch_all(db)
.await
{
Ok(a) => a,
Err(e) => { tracing::error!("restore_app_containers: DB error: {}", e); return; }
};
for app in &apps {
// Only care about apps that have at least one successful deploy.
let has_deploy: bool = sqlx::query_scalar(
"SELECT COUNT(*) > 0 FROM deploys WHERE app_id = ? AND status = 'success'"
)
.bind(&app.id)
.fetch_one(db)
.await
.unwrap_or(false);
if !has_deploy {
continue;
}
let container = format!("hiy-{}", app.id);
// Check container state via `podman inspect`.
let inspect = tokio::process::Command::new("podman")
.args(["inspect", "--format", "{{.State.Status}}", &container])
.output()
.await;
match inspect {
Ok(out) if out.status.success() => {
let status = String::from_utf8_lossy(&out.stdout).trim().to_string();
if status == "running" {
tracing::debug!("restore_app_containers: {} already running", container);
} else {
tracing::info!("restore_app_containers: starting {} (was {})", container, status);
let start = tokio::process::Command::new("podman")
.args(["start", &container])
.output()
.await;
match start {
Ok(o) if o.status.success() =>
tracing::info!("restore_app_containers: {} started", container),
Ok(o) =>
tracing::warn!("restore_app_containers: failed to start {}: {}",
container, String::from_utf8_lossy(&o.stderr).trim()),
Err(e) =>
tracing::warn!("restore_app_containers: error starting {}: {}", container, e),
}
}
}
_ => {
tracing::warn!(
"restore_app_containers: container {} not found — redeploy needed",
container
);
}
}
}
tracing::info!("restore_app_containers: done");
}
/// Push a visibility change to Caddy without requiring a full redeploy. /// Push a visibility change to Caddy without requiring a full redeploy.
/// Best-effort: logs a warning on failure but does not surface an error to the caller. /// Best-effort: logs a warning on failure but does not surface an error to the caller.
async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) { async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) {