From 48b9ccf152032eb5ac9c695140e3dae2d229bf15 Mon Sep 17 00:00:00 2001 From: Claude Date: Tue, 24 Mar 2026 15:06:34 +0000 Subject: [PATCH 01/41] =?UTF-8?q?feat:=20M4=20Hardening=20=E2=80=94=20encr?= =?UTF-8?q?yption,=20resource=20limits,=20monitoring,=20backups?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Env var encryption at rest (AES-256-GCM) - server/src/crypto.rs: new module — encrypt/decrypt with AES-256-GCM Key = SHA-256(HIY_SECRET_KEY); non-prefixed values pass through transparently for zero-downtime migration - Cargo.toml: aes-gcm = "0.10" - routes/envvars.rs: encrypt on SET; list returns masked values (••••) - routes/databases.rs: pg_password and DATABASE_URL stored encrypted - routes/ui.rs: decrypt pg_password when rendering DB card - builder.rs: decrypt env vars when writing the .env file for containers - .env.example: add HIY_SECRET_KEY entry ## Per-app resource limits - apps table: memory_limit (default 512m) + cpu_limit (default 0.5) added via idempotent ALTER TABLE in db.rs migration - models.rs: App, CreateApp, UpdateApp gain memory_limit + cpu_limit - routes/apps.rs: persist limits on create, update via PUT - builder.rs: pass MEMORY_LIMIT + CPU_LIMIT to build script - builder/build.sh: use $MEMORY_LIMIT / $CPU_LIMIT in podman run (replaces hardcoded --cpus="0.5"; --memory now also set) ## Monitoring (opt-in compose profile) - infra/docker-compose.yml: gatus + netdata under `monitoring` profile Enable: podman compose --profile monitoring up -d Gatus on :8080, Netdata on :19999 - infra/gatus.yml: Gatus config checking HIY /api/status every minute ## Backup cron job - infra/backup.sh: dumps SQLite, copies env files + git repos into a dated .tar.gz; optional rclone upload; 30-day local retention Suggested cron: 0 3 * * * /path/to/infra/backup.sh https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- Cargo.lock | 84 +++++++++++++++++++++++++++++++ builder/build.sh | 8 ++- docs/plan.md | 10 ++-- infra/backup.sh | 90 ++++++++++++++++++++++++++++++++++ infra/docker-compose.yml | 42 ++++++++++++++++ infra/gatus.yml | 39 +++++++++++++++ server/Cargo.toml | 1 + server/src/builder.rs | 15 ++++-- server/src/crypto.rs | 60 +++++++++++++++++++++++ server/src/db.rs | 6 +++ server/src/main.rs | 1 + server/src/models.rs | 6 +++ server/src/routes/apps.rs | 20 +++++++- server/src/routes/databases.rs | 32 +++++++----- server/src/routes/envvars.rs | 12 ++++- server/src/routes/ui.rs | 3 +- 16 files changed, 402 insertions(+), 27 deletions(-) create mode 100755 infra/backup.sh create mode 100644 infra/gatus.yml create mode 100644 server/src/crypto.rs diff --git a/Cargo.lock b/Cargo.lock index 8cd2016..4776597 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,41 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + [[package]] name = "ahash" version = "0.8.12" @@ -341,9 +376,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", + "rand_core", "typenum", ] +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + [[package]] name = "der" version = "0.7.10" @@ -609,6 +654,16 @@ dependencies = [ "wasip3", ] +[[package]] +name = "ghash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +dependencies = [ + "opaque-debug", + "polyval", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -668,6 +723,7 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" name = "hiy-server" version = "0.1.0" dependencies = [ + "aes-gcm", "anyhow", "async-stream", "axum", @@ -1168,6 +1224,12 @@ version = "1.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + [[package]] name = "parking_lot" version = "0.12.5" @@ -1257,6 +1319,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" +[[package]] +name = "polyval" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash", +] + [[package]] name = "potential_utf" version = "0.1.4" @@ -2192,6 +2266,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "untrusted" version = "0.9.0" diff --git a/builder/build.sh b/builder/build.sh index 63389b5..a779d11 100755 --- a/builder/build.sh +++ b/builder/build.sh @@ -2,8 +2,13 @@ # HIY Build Engine # Environment variables injected by hiy-server: # APP_ID, APP_NAME, REPO_URL, BRANCH, PORT, ENV_FILE, SHA, BUILD_DIR +# MEMORY_LIMIT (e.g. "512m"), CPU_LIMIT (e.g. "0.5") set -euo pipefail +# Defaults — overridden by per-app settings stored in the control plane. +MEMORY_LIMIT="${MEMORY_LIMIT:-512m}" +CPU_LIMIT="${CPU_LIMIT:-0.5}" + log() { echo "[hiy] $*"; } log "=== HostItYourself Build Engine ===" @@ -105,7 +110,8 @@ podman run --detach \ --label "hiy.app=${APP_ID}" \ --label "hiy.port=${PORT}" \ --restart unless-stopped \ - --cpus="0.5" \ + --memory="${MEMORY_LIMIT}" \ + --cpus="${CPU_LIMIT}" \ "$IMAGE_TAG" # ── 6. Update Caddy via its admin API ───────────────────────────────────────── diff --git a/docs/plan.md b/docs/plan.md index f2fde51..31c1adf 100644 --- a/docs/plan.md +++ b/docs/plan.md @@ -261,11 +261,11 @@ hostityourself/ - [ ] Deploy history ### M4 — Hardening (Week 5) -- [ ] Env var encryption at rest -- [ ] Resource limits on containers -- [ ] Netdata + Gatus setup -- [ ] Backup cron job -- [ ] Dashboard auth +- [x] Env var encryption at rest (AES-256-GCM via `HIY_SECRET_KEY`; transparent plaintext passthrough for migration) +- [x] Resource limits on containers (per-app `memory_limit` + `cpu_limit`; defaults 512m / 0.5 CPU) +- [x] Netdata + Gatus setup (`monitoring` compose profile; `infra/gatus.yml`) +- [x] Backup cron job (`infra/backup.sh` — SQLite dump + env files + git repos; local + rclone remote) +- [x] Dashboard auth (multi-user sessions, bcrypt, API keys — done in earlier milestone) ### M5 — Polish (Week 6) - [ ] Buildpack detection (Dockerfile / Node / Python / static) diff --git a/infra/backup.sh b/infra/backup.sh new file mode 100755 index 0000000..84e7f8e --- /dev/null +++ b/infra/backup.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# HIY daily backup script +# +# What is backed up: +# 1. SQLite database (hiy.db) — apps, deploys, env vars, users +# 2. Env files directory — decrypted env files written per deploy +# 3. Git repos — bare repos for git-push deploys +# +# Destination options (mutually exclusive; set one): +# HIY_BACKUP_DIR — local path (e.g. /mnt/usb/hiy-backups, default /tmp/hiy-backups) +# HIY_BACKUP_REMOTE — rclone remote:path (e.g. "b2:mybucket/hiy") +# requires rclone installed and configured +# +# Retention: 30 days (local only; remote retention is managed by the storage provider) +# +# Suggested cron (run as the same user as hiy-server): +# 0 3 * * * /path/to/infra/backup.sh >> /var/log/hiy-backup.log 2>&1 + +set -euo pipefail + +# ── Config ───────────────────────────────────────────────────────────────────── +HIY_DATA_DIR="${HIY_DATA_DIR:-/data}" +BACKUP_DIR="${HIY_BACKUP_DIR:-/tmp/hiy-backups}" +BACKUP_REMOTE="${HIY_BACKUP_REMOTE:-}" +RETAIN_DAYS="${HIY_BACKUP_RETAIN_DAYS:-30}" + +TIMESTAMP=$(date +%Y%m%d-%H%M%S) +ARCHIVE_NAME="hiy-backup-${TIMESTAMP}.tar.gz" +STAGING="${BACKUP_DIR}/staging-${TIMESTAMP}" + +log() { echo "[hiy-backup] $(date '+%H:%M:%S') $*"; } + +log "=== HIY Backup ===" +log "Data dir: ${HIY_DATA_DIR}" +log "Staging: ${STAGING}" + +# ── 1. Stage files ───────────────────────────────────────────────────────────── +mkdir -p "${STAGING}" + +# SQLite: use the .dump command to produce a portable SQL text dump. +if [ -f "${HIY_DATA_DIR}/hiy.db" ]; then + log "Dumping SQLite database…" + sqlite3 "${HIY_DATA_DIR}/hiy.db" .dump > "${STAGING}/hiy.sql" +else + log "WARNING: ${HIY_DATA_DIR}/hiy.db not found — skipping SQLite dump" +fi + +# Env files (contain decrypted secrets — handle with care). +if [ -d "${HIY_DATA_DIR}/envs" ]; then + log "Copying env files…" + cp -r "${HIY_DATA_DIR}/envs" "${STAGING}/envs" +fi + +# Bare git repos. +if [ -d "${HIY_DATA_DIR}/repos" ]; then + log "Copying git repos…" + cp -r "${HIY_DATA_DIR}/repos" "${STAGING}/repos" +fi + +# ── 2. Create archive ────────────────────────────────────────────────────────── +mkdir -p "${BACKUP_DIR}" +ARCHIVE_PATH="${BACKUP_DIR}/${ARCHIVE_NAME}" +log "Creating archive: ${ARCHIVE_PATH}" +tar -czf "${ARCHIVE_PATH}" -C "${STAGING}" . +rm -rf "${STAGING}" + +ARCHIVE_SIZE=$(du -sh "${ARCHIVE_PATH}" | cut -f1) +log "Archive size: ${ARCHIVE_SIZE}" + +# ── 3. Upload to remote (optional) ──────────────────────────────────────────── +if [ -n "${BACKUP_REMOTE}" ]; then + if command -v rclone &>/dev/null; then + log "Uploading to remote: ${BACKUP_REMOTE}" + rclone copy "${ARCHIVE_PATH}" "${BACKUP_REMOTE}/" + log "Upload complete." + else + log "WARNING: HIY_BACKUP_REMOTE is set but rclone is not installed — skipping upload" + log "Install rclone: https://rclone.org/install/" + fi +fi + +# ── 4. Rotate old local backups ──────────────────────────────────────────────── +log "Removing local backups older than ${RETAIN_DAYS} days…" +find "${BACKUP_DIR}" -maxdepth 1 -name 'hiy-backup-*.tar.gz' \ + -mtime "+${RETAIN_DAYS}" -delete + +REMAINING=$(find "${BACKUP_DIR}" -maxdepth 1 -name 'hiy-backup-*.tar.gz' | wc -l) +log "Local backups retained: ${REMAINING}" + +log "=== Backup complete: ${ARCHIVE_NAME} ===" diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 2773344..46ad412 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -94,6 +94,45 @@ services: - hiy-net - default + # ── Uptime / health checks ──────────────────────────────────────────────── + # Enable with: podman compose --profile monitoring up -d + gatus: + profiles: [monitoring] + image: twinproduction/gatus:latest + restart: unless-stopped + ports: + - "8080:8080" + volumes: + - ./gatus.yml:/config/config.yaml:ro + networks: + - hiy-net + + # ── Host metrics (rootful Podman / Docker only) ─────────────────────────── + # On rootless Podman some host mounts may be unavailable; comment out if so. + netdata: + profiles: [monitoring] + image: netdata/netdata:stable + restart: unless-stopped + ports: + - "19999:19999" + pid: host + cap_add: + - SYS_PTRACE + - SYS_ADMIN + security_opt: + - apparmor:unconfined + volumes: + - netdata-config:/etc/netdata + - netdata-lib:/var/lib/netdata + - netdata-cache:/var/cache/netdata + - /etc/os-release:/host/etc/os-release:ro + - /etc/passwd:/host/etc/passwd:ro + - /etc/group:/host/etc/group:ro + - /proc:/host/proc:ro + - /sys:/host/sys:ro + networks: + - hiy-net + networks: hiy-net: name: hiy-net @@ -105,3 +144,6 @@ volumes: caddy-data: caddy-config: hiy-pg-data: + netdata-config: + netdata-lib: + netdata-cache: diff --git a/infra/gatus.yml b/infra/gatus.yml new file mode 100644 index 0000000..00618c8 --- /dev/null +++ b/infra/gatus.yml @@ -0,0 +1,39 @@ +# Gatus uptime / health check configuration for HIY. +# Docs: https://github.com/TwiN/gatus + +web: + port: 8080 + +# In-memory storage — no persistence needed for uptime checks. +storage: + type: memory + +# Alert via email when an endpoint is down (optional — remove if not needed). +# alerting: +# email: +# from: gatus@yourdomain.com +# username: gatus@yourdomain.com +# password: ${EMAIL_PASSWORD} +# host: smtp.yourdomain.com +# port: 587 +# to: you@yourdomain.com + +endpoints: + - name: HIY Dashboard + url: http://server:3000/api/status + interval: 1m + conditions: + - "[STATUS] == 200" + alerts: + - type: email + description: HIY dashboard is unreachable + send-on-resolved: true + + # Add an entry per deployed app: + # + # - name: my-app + # url: http://my-app:3001/health + # interval: 1m + # conditions: + # - "[STATUS] == 200" + # - "[RESPONSE_TIME] < 500" diff --git a/server/Cargo.toml b/server/Cargo.toml index dfe1497..cc8d19f 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -24,6 +24,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] } dotenvy = "0.15" async-stream = "0.3" bcrypt = "0.15" +aes-gcm = "0.10" anyhow = "1" futures = "0.3" base64 = "0.22" diff --git a/server/src/builder.rs b/server/src/builder.rs index 48aab28..ea1c25c 100644 --- a/server/src/builder.rs +++ b/server/src/builder.rs @@ -81,10 +81,15 @@ async fn run_build(state: &AppState, deploy_id: &str) -> anyhow::Result<()> { .fetch_all(&state.db) .await?; - let env_content: String = env_vars - .iter() - .map(|e| format!("{}={}\n", e.key, e.value)) - .collect(); + let mut env_content = String::new(); + for e in &env_vars { + let plain = crate::crypto::decrypt(&e.value) + .unwrap_or_else(|err| { + tracing::warn!("Could not decrypt env var {}: {} — using raw value", e.key, err); + e.value.clone() + }); + env_content.push_str(&format!("{}={}\n", e.key, plain)); + } std::fs::write(&env_file, env_content)?; // Mark as building. @@ -138,6 +143,8 @@ async fn run_build(state: &AppState, deploy_id: &str) -> anyhow::Result<()> { .env("ENV_FILE", &env_file) .env("SHA", deploy.sha.as_deref().unwrap_or("")) .env("BUILD_DIR", &build_dir) + .env("MEMORY_LIMIT", &app.memory_limit) + .env("CPU_LIMIT", &app.cpu_limit) .env("DOMAIN_SUFFIX", &domain_suffix) .env("CADDY_API_URL", &caddy_api_url) .stdout(std::process::Stdio::piped()) diff --git a/server/src/crypto.rs b/server/src/crypto.rs new file mode 100644 index 0000000..12d0f2e --- /dev/null +++ b/server/src/crypto.rs @@ -0,0 +1,60 @@ +/// AES-256-GCM envelope encryption for values stored at rest. +/// +/// Encrypted blobs are prefixed with `enc:v1:` so plaintext values written +/// before encryption was enabled are transparently passed through on decrypt. +/// +/// Key derivation: SHA-256 of `HIY_SECRET_KEY` env var. If the var is +/// absent a hard-coded default is used and a warning is logged once. +use aes_gcm::{ + aead::{Aead, AeadCore, KeyInit, OsRng}, + Aes256Gcm, Key, Nonce, +}; +use base64::{engine::general_purpose::STANDARD, Engine}; +use sha2::{Digest, Sha256}; + +const PREFIX: &str = "enc:v1:"; + +fn key_bytes() -> [u8; 32] { + let secret = std::env::var("HIY_SECRET_KEY").unwrap_or_else(|_| { + tracing::warn!( + "HIY_SECRET_KEY is not set — env vars are encrypted with the default insecure key. \ + Set HIY_SECRET_KEY in .env to a random 32+ char string." + ); + "hiy-default-insecure-key-please-change-me".into() + }); + Sha256::digest(secret.as_bytes()).into() +} + +/// Encrypt a plaintext value and return `enc:v1:`. +pub fn encrypt(plaintext: &str) -> anyhow::Result { + let kb = key_bytes(); + let cipher = Aes256Gcm::new(Key::::from_slice(&kb)); + let nonce = Aes256Gcm::generate_nonce(&mut OsRng); + let ct = cipher + .encrypt(&nonce, plaintext.as_bytes()) + .map_err(|e| anyhow::anyhow!("encrypt: {}", e))?; + let mut blob = nonce.to_vec(); + blob.extend_from_slice(&ct); + Ok(format!("{}{}", PREFIX, STANDARD.encode(&blob))) +} + +/// Decrypt an `enc:v1:…` value. Non-prefixed strings are returned as-is +/// (transparent migration path for pre-encryption data). +pub fn decrypt(value: &str) -> anyhow::Result { + if !value.starts_with(PREFIX) { + return Ok(value.to_string()); + } + let blob = STANDARD + .decode(&value[PREFIX.len()..]) + .map_err(|e| anyhow::anyhow!("base64: {}", e))?; + if blob.len() < 12 { + anyhow::bail!("ciphertext too short"); + } + let (nonce_bytes, ct) = blob.split_at(12); + let kb = key_bytes(); + let cipher = Aes256Gcm::new(Key::::from_slice(&kb)); + let plain = cipher + .decrypt(Nonce::from_slice(nonce_bytes), ct) + .map_err(|e| anyhow::anyhow!("decrypt: {}", e))?; + String::from_utf8(plain).map_err(Into::into) +} diff --git a/server/src/db.rs b/server/src/db.rs index a2eebe1..7d4ce46 100644 --- a/server/src/db.rs +++ b/server/src/db.rs @@ -101,6 +101,12 @@ pub async fn migrate(pool: &DbPool) -> anyhow::Result<()> { .execute(pool) .await?; + // Idempotent column additions for existing databases (SQLite ignores "column exists" errors). + let _ = sqlx::query("ALTER TABLE apps ADD COLUMN memory_limit TEXT NOT NULL DEFAULT '512m'") + .execute(pool).await; + let _ = sqlx::query("ALTER TABLE apps ADD COLUMN cpu_limit TEXT NOT NULL DEFAULT '0.5'") + .execute(pool).await; + sqlx::query( r#"CREATE TABLE IF NOT EXISTS databases ( app_id TEXT PRIMARY KEY REFERENCES apps(id) ON DELETE CASCADE, diff --git a/server/src/main.rs b/server/src/main.rs index 4235a8e..ad3eee9 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -10,6 +10,7 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; mod auth; mod builder; +mod crypto; mod db; mod models; mod routes; diff --git a/server/src/models.rs b/server/src/models.rs index a2444a6..39b9efe 100644 --- a/server/src/models.rs +++ b/server/src/models.rs @@ -8,6 +8,8 @@ pub struct App { pub branch: String, pub port: i64, pub webhook_secret: String, + pub memory_limit: String, + pub cpu_limit: String, pub created_at: String, pub updated_at: String, } @@ -18,6 +20,8 @@ pub struct CreateApp { pub repo_url: Option, pub branch: Option, pub port: i64, + pub memory_limit: Option, + pub cpu_limit: Option, } #[derive(Debug, Deserialize)] @@ -25,6 +29,8 @@ pub struct UpdateApp { pub repo_url: Option, pub branch: Option, pub port: Option, + pub memory_limit: Option, + pub cpu_limit: Option, } #[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] diff --git a/server/src/routes/apps.rs b/server/src/routes/apps.rs index e5344a5..3c161ca 100644 --- a/server/src/routes/apps.rs +++ b/server/src/routes/apps.rs @@ -29,10 +29,12 @@ pub async fn create( let now = Utc::now().to_rfc3339(); let branch = payload.branch.unwrap_or_else(|| "main".into()); let secret = Uuid::new_v4().to_string().replace('-', ""); + let memory_limit = payload.memory_limit.unwrap_or_else(|| "512m".into()); + let cpu_limit = payload.cpu_limit.unwrap_or_else(|| "0.5".into()); sqlx::query( - "INSERT INTO apps (id, name, repo_url, branch, port, webhook_secret, created_at, updated_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + "INSERT INTO apps (id, name, repo_url, branch, port, webhook_secret, memory_limit, cpu_limit, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", ) .bind(&id) .bind(&payload.name) @@ -40,6 +42,8 @@ pub async fn create( .bind(&branch) .bind(payload.port) .bind(&secret) + .bind(&memory_limit) + .bind(&cpu_limit) .bind(&now) .bind(&now) .execute(&s.db) @@ -89,6 +93,18 @@ pub async fn update( .execute(&s.db).await .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; } + if let Some(v) = payload.memory_limit { + sqlx::query("UPDATE apps SET memory_limit = ?, updated_at = ? WHERE id = ?") + .bind(v).bind(&now).bind(&id) + .execute(&s.db).await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + } + if let Some(v) = payload.cpu_limit { + sqlx::query("UPDATE apps SET cpu_limit = ?, updated_at = ? WHERE id = ?") + .bind(v).bind(&now).bind(&id) + .execute(&s.db).await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + } fetch_app(&s, &id).await.map(Json) } diff --git a/server/src/routes/databases.rs b/server/src/routes/databases.rs index 37db709..33f6fbc 100644 --- a/server/src/routes/databases.rs +++ b/server/src/routes/databases.rs @@ -5,7 +5,7 @@ use axum::{ }; use serde_json::json; -use crate::{models::Database, AppState}; +use crate::{crypto, models::Database, AppState}; type ApiError = (StatusCode, String); @@ -39,13 +39,17 @@ pub async fn get_db( match db { None => Err(err(StatusCode::NOT_FOUND, "No database provisioned")), - Some(d) => Ok(Json(json!({ - "app_id": d.app_id, - "schema": d.app_id, - "pg_user": d.pg_user, - "conn_str": conn_str(&d.pg_user, &d.pg_password), - "created_at": d.created_at, - }))), + Some(d) => { + let pw = crypto::decrypt(&d.pg_password) + .map_err(|e| err(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + Ok(Json(json!({ + "app_id": d.app_id, + "schema": d.app_id, + "pg_user": d.pg_user, + "conn_str": conn_str(&d.pg_user, &pw), + "created_at": d.created_at, + }))) + } } } @@ -107,27 +111,31 @@ pub async fn provision( .execute(pg).await .map_err(|e| err(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; - // Persist credentials. + // Persist credentials (password encrypted at rest). + let enc_password = crypto::encrypt(&password) + .map_err(|e| err(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; let now = chrono::Utc::now().to_rfc3339(); sqlx::query( "INSERT INTO databases (app_id, pg_user, pg_password, created_at) VALUES (?, ?, ?, ?)", ) .bind(&app_id) .bind(&pg_user) - .bind(&password) + .bind(&enc_password) .bind(&now) .execute(&s.db) .await .map_err(|e| err(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; - // Inject DATABASE_URL as an app env var (picked up on next deploy). + // Inject DATABASE_URL as an encrypted app env var (picked up on next deploy). let url = conn_str(&pg_user, &password); + let enc_url = crypto::encrypt(&url) + .map_err(|e| err(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; sqlx::query( "INSERT INTO env_vars (app_id, key, value) VALUES (?, 'DATABASE_URL', ?) ON CONFLICT (app_id, key) DO UPDATE SET value = excluded.value", ) .bind(&app_id) - .bind(&url) + .bind(&enc_url) .execute(&s.db) .await .map_err(|e| err(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; diff --git a/server/src/routes/envvars.rs b/server/src/routes/envvars.rs index 4ef00fd..6ae8f0a 100644 --- a/server/src/routes/envvars.rs +++ b/server/src/routes/envvars.rs @@ -5,6 +5,7 @@ use axum::{ }; use crate::{ + crypto, models::{EnvVar, SetEnvVar}, AppState, }; @@ -20,7 +21,12 @@ pub async fn list( .fetch_all(&s.db) .await .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; - Ok(Json(vars)) + // Return keys only; values are masked in the UI and never sent in plaintext. + let masked: Vec = vars + .into_iter() + .map(|e| EnvVar { value: "••••••••".into(), ..e }) + .collect(); + Ok(Json(masked)) } pub async fn set( @@ -28,13 +34,15 @@ pub async fn set( Path(app_id): Path, Json(payload): Json, ) -> Result { + let encrypted = crypto::encrypt(&payload.value) + .map_err(|e| { tracing::error!("encrypt env var: {}", e); StatusCode::INTERNAL_SERVER_ERROR })?; sqlx::query( "INSERT INTO env_vars (app_id, key, value) VALUES (?, ?, ?) ON CONFLICT(app_id, key) DO UPDATE SET value = excluded.value", ) .bind(&app_id) .bind(&payload.key) - .bind(&payload.value) + .bind(&encrypted) .execute(&s.db) .await .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; diff --git a/server/src/routes/ui.rs b/server/src/routes/ui.rs index 875e13a..9e51a6f 100644 --- a/server/src/routes/ui.rs +++ b/server/src/routes/ui.rs @@ -278,7 +278,8 @@ pub async fn app_detail( "# .to_string(), (true, Some(db)) => { - let url = format!("postgres://{}:{}@postgres:5432/hiy", db.pg_user, db.pg_password); + let pw = crate::crypto::decrypt(&db.pg_password).unwrap_or_default(); + let url = format!("postgres://{}:{}@postgres:5432/hiy", db.pg_user, pw); format!(r#"

Database

From b23e02f2d23f2a3afb96b5fa5c5c11a34846f86d Mon Sep 17 00:00:00 2001 From: Claude Date: Tue, 24 Mar 2026 16:18:46 +0000 Subject: [PATCH 02/41] fix: declare default network for podman-compose compatibility podman-compose requires all networks referenced in service configs to be explicitly declared in the top-level networks block. Docker Compose creates the default network implicitly, but podman-compose errors with 'missing networks: default'. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 46ad412..032cbe6 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -138,6 +138,7 @@ networks: name: hiy-net # External so deployed app containers can join it. external: false + default: {} volumes: hiy-data: From f50492f13226b2f99f0f38a937a64530fa92a7e9 Mon Sep 17 00:00:00 2001 From: Claude Date: Tue, 24 Mar 2026 16:20:22 +0000 Subject: [PATCH 03/41] fix: fully-qualify all image names for Podman without search registries Podman without unqualified-search registries configured in /etc/containers/registries.conf refuses to resolve short image names. Prefix every image with docker.io/library/ (official images) or docker.io// (third-party) so pulls succeed unconditionally. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/Dockerfile.server | 4 ++-- infra/docker-compose.yml | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/infra/Dockerfile.server b/infra/Dockerfile.server index 13d8fdf..236ab0b 100644 --- a/infra/Dockerfile.server +++ b/infra/Dockerfile.server @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1 # ── Build stage ─────────────────────────────────────────────────────────────── # Run the compiler on the *build* host; cross-compile to target when needed. -FROM --platform=$BUILDPLATFORM rust:1.94-slim-bookworm AS builder +FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.94-slim-bookworm AS builder ARG BUILDPLATFORM ARG TARGETPLATFORM @@ -60,7 +60,7 @@ RUN TARGET=$(cat /rust_target) && \ RUN cp /build/target/"$(cat /rust_target)"/release/hiy-server /usr/local/bin/hiy-server # ── Runtime stage ───────────────────────────────────────────────────────────── -FROM debian:bookworm-slim +FROM docker.io/library/debian:bookworm-slim RUN apt-get update && apt-get install -y \ ca-certificates \ diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 032cbe6..33a0e52 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -12,7 +12,7 @@ services: # rootful: /run/podman/podman.sock # rootless: /run/user//podman/podman.sock (start.sh sets this) podman-proxy: - image: alpine/socat + image: docker.io/alpine/socat command: tcp-listen:2375,fork,reuseaddr unix-connect:/podman.sock restart: unless-stopped volumes: @@ -62,7 +62,7 @@ services: # ── Shared Postgres ─────────────────────────────────────────────────────── postgres: - image: postgres:16-alpine + image: docker.io/library/postgres:16-alpine restart: unless-stopped environment: POSTGRES_DB: hiy @@ -75,7 +75,7 @@ services: # ── Reverse proxy ───────────────────────────────────────────────────────── caddy: - image: caddy:2-alpine + image: docker.io/library/caddy:2-alpine restart: unless-stopped ports: - "80:80" @@ -98,7 +98,7 @@ services: # Enable with: podman compose --profile monitoring up -d gatus: profiles: [monitoring] - image: twinproduction/gatus:latest + image: docker.io/twinproduction/gatus:latest restart: unless-stopped ports: - "8080:8080" @@ -111,7 +111,7 @@ services: # On rootless Podman some host mounts may be unavailable; comment out if so. netdata: profiles: [monitoring] - image: netdata/netdata:stable + image: docker.io/netdata/netdata:stable restart: unless-stopped ports: - "19999:19999" From a873049e96a22cd300ee73e6ed9b517662b22feb Mon Sep 17 00:00:00 2001 From: Claude Date: Tue, 24 Mar 2026 16:23:02 +0000 Subject: [PATCH 04/41] fix: install gcc and configure native x86_64 linker in build image rust:slim-bookworm doesn't include gcc, and aes-gcm's build deps (via cc-rs) need a C compiler. With --target x86_64-unknown-linux-gnu set explicitly, cc-rs looks for the cross-compiler 'x86_64-linux-gnu-gcc' instead of native 'gcc'. Fix: install gcc in the build stage and add a [target.x86_64-*] linker entry pointing to 'gcc' so cc-rs finds it on native x86_64 builds. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/Dockerfile.server | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/infra/Dockerfile.server b/infra/Dockerfile.server index 236ab0b..4f71b73 100644 --- a/infra/Dockerfile.server +++ b/infra/Dockerfile.server @@ -8,8 +8,10 @@ ARG TARGETPLATFORM ARG TARGETARCH ARG TARGETVARIANT -# Install cross-compilation toolchains only when actually cross-compiling. -RUN apt-get update && apt-get install -y pkg-config && \ +# Install build tools. gcc is always needed: cc-rs (used by ring/aes-gcm deps) +# calls the native compiler even on native builds, and rust:slim doesn't +# include it. Cross-compilers are added only when actually cross-compiling. +RUN apt-get update && apt-get install -y gcc pkg-config && \ if [ "${BUILDPLATFORM}" != "${TARGETPLATFORM}" ]; then \ case "${TARGETARCH}:${TARGETVARIANT}" in \ "arm64:") apt-get install -y gcc-aarch64-linux-gnu ;; \ @@ -29,8 +31,13 @@ RUN case "${TARGETARCH}:${TARGETVARIANT}" in \ esac > /rust_target && \ rustup target add "$(cat /rust_target)" -# Tell Cargo which cross-linker to use (ignored on native builds). +# Tell Cargo which linker to use for each target. +# x86_64 native: use plain gcc (cc-rs would otherwise look for +# "x86_64-linux-gnu-gcc" which is the *cross* toolchain, not installed here). RUN mkdir -p /root/.cargo && printf '\ +[target.x86_64-unknown-linux-gnu]\n\ +linker = "gcc"\n\ +\n\ [target.aarch64-unknown-linux-gnu]\n\ linker = "aarch64-linux-gnu-gcc"\n\ \n\ From 0bd7b44b81d03bb1020901aaff15763cb6f059da Mon Sep 17 00:00:00 2001 From: Claude Date: Tue, 24 Mar 2026 16:25:48 +0000 Subject: [PATCH 05/41] fix: drop cross-compilation, build natively in Dockerfile MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit podman-compose does not populate BUILDPLATFORM/TARGETARCH build args, so the platform-detection logic always fell back to x86_64 — even on arm64. This caused cc-rs to look for 'x86_64-linux-gnu-gcc' instead of 'gcc'. Replace the entire cross-compile scaffolding with a plain native build: cargo build --release (no --target) Cargo targets the host platform automatically. If cross-compilation is ever needed it can be reintroduced with a properly-tested setup. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/Dockerfile.server | 64 +++++++---------------------------------- 1 file changed, 11 insertions(+), 53 deletions(-) diff --git a/infra/Dockerfile.server b/infra/Dockerfile.server index 4f71b73..d7c4e5d 100644 --- a/infra/Dockerfile.server +++ b/infra/Dockerfile.server @@ -1,70 +1,28 @@ # syntax=docker/dockerfile:1 # ── Build stage ─────────────────────────────────────────────────────────────── -# Run the compiler on the *build* host; cross-compile to target when needed. -FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.94-slim-bookworm AS builder +# Native build: Cargo targets the host platform automatically. +# No --target flag means no cross-compiler confusion regardless of which +# arch podman-compose runs on (x86_64, arm64, armv7…). +FROM docker.io/library/rust:1.94-slim-bookworm AS builder -ARG BUILDPLATFORM -ARG TARGETPLATFORM -ARG TARGETARCH -ARG TARGETVARIANT - -# Install build tools. gcc is always needed: cc-rs (used by ring/aes-gcm deps) -# calls the native compiler even on native builds, and rust:slim doesn't -# include it. Cross-compilers are added only when actually cross-compiling. +# gcc is required by cc-rs (used by aes-gcm / ring build scripts). +# rust:slim does not include a C compiler. RUN apt-get update && apt-get install -y gcc pkg-config && \ - if [ "${BUILDPLATFORM}" != "${TARGETPLATFORM}" ]; then \ - case "${TARGETARCH}:${TARGETVARIANT}" in \ - "arm64:") apt-get install -y gcc-aarch64-linux-gnu ;; \ - "arm:v7") apt-get install -y gcc-arm-linux-gnueabihf ;; \ - "arm:v6") apt-get install -y gcc-arm-linux-gnueabi ;; \ - esac; \ - fi && \ rm -rf /var/lib/apt/lists/* -# Map TARGETARCH + TARGETVARIANT → Rust target triple, then install it. -RUN case "${TARGETARCH}:${TARGETVARIANT}" in \ - "amd64:") echo x86_64-unknown-linux-gnu ;; \ - "arm64:") echo aarch64-unknown-linux-gnu ;; \ - "arm:v7") echo armv7-unknown-linux-gnueabihf ;; \ - "arm:v6") echo arm-unknown-linux-gnueabi ;; \ - *) echo x86_64-unknown-linux-gnu ;; \ - esac > /rust_target && \ - rustup target add "$(cat /rust_target)" - -# Tell Cargo which linker to use for each target. -# x86_64 native: use plain gcc (cc-rs would otherwise look for -# "x86_64-linux-gnu-gcc" which is the *cross* toolchain, not installed here). -RUN mkdir -p /root/.cargo && printf '\ -[target.x86_64-unknown-linux-gnu]\n\ -linker = "gcc"\n\ -\n\ -[target.aarch64-unknown-linux-gnu]\n\ -linker = "aarch64-linux-gnu-gcc"\n\ -\n\ -[target.armv7-unknown-linux-gnueabihf]\n\ -linker = "arm-linux-gnueabihf-gcc"\n\ -\n\ -[target.arm-unknown-linux-gnueabi]\n\ -linker = "arm-linux-gnueabi-gcc"\n' >> /root/.cargo/config.toml - WORKDIR /build -# Cache dependencies separately from source. +# Cache dependency compilation separately from application source. COPY Cargo.toml Cargo.lock* ./ COPY server/Cargo.toml ./server/ RUN mkdir -p server/src && echo 'fn main(){}' > server/src/main.rs -RUN TARGET=$(cat /rust_target) && \ - cargo build --release --target "$TARGET" -p hiy-server 2>/dev/null || true +RUN cargo build --release -p hiy-server 2>/dev/null || true RUN rm -f server/src/main.rs # Build actual source. COPY server/src ./server/src -RUN TARGET=$(cat /rust_target) && \ - touch server/src/main.rs && \ - cargo build --release --target "$TARGET" -p hiy-server - -# Normalise binary location so the runtime stage doesn't need to know the target. -RUN cp /build/target/"$(cat /rust_target)"/release/hiy-server /usr/local/bin/hiy-server +RUN touch server/src/main.rs && \ + cargo build --release -p hiy-server # ── Runtime stage ───────────────────────────────────────────────────────────── FROM docker.io/library/debian:bookworm-slim @@ -78,7 +36,7 @@ RUN apt-get update && apt-get install -y \ podman \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/bin/hiy-server /usr/local/bin/hiy-server +COPY --from=builder /build/target/release/hiy-server /usr/local/bin/hiy-server WORKDIR /app From 60f5df52f7e58e359e7ea57c212b21e96988bb76 Mon Sep 17 00:00:00 2001 From: Claude Date: Tue, 24 Mar 2026 16:29:15 +0000 Subject: [PATCH 06/41] fix: copy server/templates into build image for include_str! macros include_str!("../../templates/...") is resolved at compile time, so the template files must be present in the Docker build context. The previous Dockerfile only copied server/src, not server/templates. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/Dockerfile.server | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/infra/Dockerfile.server b/infra/Dockerfile.server index d7c4e5d..3b54f8a 100644 --- a/infra/Dockerfile.server +++ b/infra/Dockerfile.server @@ -20,7 +20,8 @@ RUN cargo build --release -p hiy-server 2>/dev/null || true RUN rm -f server/src/main.rs # Build actual source. -COPY server/src ./server/src +COPY server/src ./server/src +COPY server/templates ./server/templates RUN touch server/src/main.rs && \ cargo build --release -p hiy-server From 73ea7320fd06f1f7d3b6c38d2a250c19d7a0395d Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 25 Mar 2026 22:09:00 +0000 Subject: [PATCH 07/41] fix: use Caddy internal CA when ACME_EMAIL is not set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DOMAIN_SUFFIX=local (or any non-localhost LAN name) caused a TLS handshake failure because Caddy attempted an ACME challenge that can never succeed for private domains. - Caddyfile: tls {$ACME_EMAIL:internal} — falls back to Caddy's built-in CA when ACME_EMAIL is absent, uses Let's Encrypt when it is set. - start.sh: ACME_EMAIL is now optional; missing it prints a warning instead of aborting, so local/LAN setups work without an email address. To trust the self-signed cert in a browser run: caddy trust https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/start.sh | 39 ++------------------------------------- proxy/Caddyfile | 4 ++++ 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/infra/start.sh b/infra/start.sh index 1d453bc..a82ee60 100755 --- a/infra/start.sh +++ b/infra/start.sh @@ -20,45 +20,10 @@ if [ -z "$DOMAIN_SUFFIX" ] || [ "$DOMAIN_SUFFIX" = "localhost" ]; then fi if [ -z "$ACME_EMAIL" ]; then - echo "ERROR: Set ACME_EMAIL in infra/.env (required for Let's Encrypt)" - exit 1 + echo "[hiy] ACME_EMAIL not set — Caddy will use its internal CA (self-signed)." + echo "[hiy] For a public domain with Let's Encrypt, set ACME_EMAIL in infra/.env" fi -# ── Generate production caddy.json ───────────────────────────────────────────── -# Writes TLS-enabled config using Let's Encrypt (no Cloudflare required). -# Caddy will use the HTTP-01 challenge (port 80) or TLS-ALPN-01 (port 443). -cat > "$SCRIPT_DIR/../proxy/caddy.json" < Date: Thu, 26 Mar 2026 08:24:55 +0000 Subject: [PATCH 08/41] feat: private repo support via encrypted git token - db.rs: add nullable git_token column (idempotent ALTER TABLE ADD COLUMN) - models.rs: git_token on App (#[serde(skip_serializing)]), CreateApp, UpdateApp - routes/apps.rs: encrypt token on create/update; empty string clears it - builder.rs: decrypt token, pass as GIT_TOKEN env var to build script - build.sh: GIT_TERMINAL_PROMPT=0 (fail fast, not hang); when GIT_TOKEN is set, inject it into the HTTPS clone URL as x-token-auth; strip credentials from .git/config after clone/fetch so the token is never persisted to disk Token usage: PATCH /api/apps/:id with {"git_token": "ghp_..."} Clear token: PATCH /api/apps/:id with {"git_token": ""} https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- builder/build.sh | 21 ++++++++++++++++++++- server/src/builder.rs | 17 ++++++++++++++--- server/src/db.rs | 2 ++ server/src/models.rs | 5 +++++ server/src/routes/apps.rs | 26 ++++++++++++++++++++++++-- 5 files changed, 65 insertions(+), 6 deletions(-) diff --git a/builder/build.sh b/builder/build.sh index a779d11..bc5e79f 100755 --- a/builder/build.sh +++ b/builder/build.sh @@ -5,6 +5,9 @@ # MEMORY_LIMIT (e.g. "512m"), CPU_LIMIT (e.g. "0.5") set -euo pipefail +# Never prompt for git credentials — fail immediately if auth is missing. +export GIT_TERMINAL_PROMPT=0 + # Defaults — overridden by per-app settings stored in the control plane. MEMORY_LIMIT="${MEMORY_LIMIT:-512m}" CPU_LIMIT="${CPU_LIMIT:-0.5}" @@ -18,17 +21,33 @@ log "Branch: $BRANCH" log "Build dir: $BUILD_DIR" # ── 1. Clone or pull ─────────────────────────────────────────────────────────── +# Build an authenticated URL when a git token is set (private repos). +# GIT_TOKEN is passed by hiy-server and never echoed here. +CLONE_URL="$REPO_URL" +if [ -n "${GIT_TOKEN:-}" ]; then + case "$REPO_URL" in + https://*) + CLONE_URL="https://x-token-auth:${GIT_TOKEN}@${REPO_URL#https://}" + ;; + esac +fi + mkdir -p "$BUILD_DIR" cd "$BUILD_DIR" if [ -d ".git" ]; then log "Updating existing clone…" + git remote set-url origin "$CLONE_URL" git fetch origin "$BRANCH" --depth=50 git checkout "$BRANCH" git reset --hard "origin/$BRANCH" + # Strip credentials from the stored remote so they don't sit in .git/config. + git remote set-url origin "$REPO_URL" else log "Cloning repository…" - git clone --depth=50 --branch "$BRANCH" "$REPO_URL" . + git clone --depth=50 --branch "$BRANCH" "$CLONE_URL" . + # Strip credentials from the stored remote so they don't sit in .git/config. + git remote set-url origin "$REPO_URL" fi ACTUAL_SHA=$(git rev-parse HEAD) diff --git a/server/src/builder.rs b/server/src/builder.rs index ea1c25c..32a95a6 100644 --- a/server/src/builder.rs +++ b/server/src/builder.rs @@ -133,11 +133,22 @@ async fn run_build(state: &AppState, deploy_id: &str) -> anyhow::Result<()> { let domain_suffix = std::env::var("DOMAIN_SUFFIX").unwrap_or_else(|_| "localhost".into()); let caddy_api_url = std::env::var("CADDY_API_URL").unwrap_or_else(|_| "http://localhost:2019".into()); - let mut child = Command::new("bash") - .arg(&build_script) + let mut cmd = Command::new("bash"); + cmd.arg(&build_script) .env("APP_ID", &app.id) .env("APP_NAME", &app.name) - .env("REPO_URL", &repo_url) + .env("REPO_URL", &repo_url); + + // Decrypt the git token (if any) and pass it separately so build.sh can + // inject it into the clone URL without it appearing in REPO_URL or logs. + if let Some(enc) = &app.git_token { + match crate::crypto::decrypt(enc) { + Ok(tok) => { cmd.env("GIT_TOKEN", tok); } + Err(e) => tracing::warn!("Could not decrypt git_token for {}: {}", app.id, e), + } + } + + let mut child = cmd .env("BRANCH", &app.branch) .env("PORT", app.port.to_string()) .env("ENV_FILE", &env_file) diff --git a/server/src/db.rs b/server/src/db.rs index 7d4ce46..0366a6a 100644 --- a/server/src/db.rs +++ b/server/src/db.rs @@ -106,6 +106,8 @@ pub async fn migrate(pool: &DbPool) -> anyhow::Result<()> { .execute(pool).await; let _ = sqlx::query("ALTER TABLE apps ADD COLUMN cpu_limit TEXT NOT NULL DEFAULT '0.5'") .execute(pool).await; + let _ = sqlx::query("ALTER TABLE apps ADD COLUMN git_token TEXT") + .execute(pool).await; sqlx::query( r#"CREATE TABLE IF NOT EXISTS databases ( diff --git a/server/src/models.rs b/server/src/models.rs index 39b9efe..c6de907 100644 --- a/server/src/models.rs +++ b/server/src/models.rs @@ -12,6 +12,9 @@ pub struct App { pub cpu_limit: String, pub created_at: String, pub updated_at: String, + /// Encrypted git token for cloning private repos. Never serialised to API responses. + #[serde(skip_serializing)] + pub git_token: Option, } #[derive(Debug, Deserialize)] @@ -22,6 +25,7 @@ pub struct CreateApp { pub port: i64, pub memory_limit: Option, pub cpu_limit: Option, + pub git_token: Option, } #[derive(Debug, Deserialize)] @@ -31,6 +35,7 @@ pub struct UpdateApp { pub port: Option, pub memory_limit: Option, pub cpu_limit: Option, + pub git_token: Option, } #[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] diff --git a/server/src/routes/apps.rs b/server/src/routes/apps.rs index 3c161ca..f354498 100644 --- a/server/src/routes/apps.rs +++ b/server/src/routes/apps.rs @@ -31,10 +31,16 @@ pub async fn create( let secret = Uuid::new_v4().to_string().replace('-', ""); let memory_limit = payload.memory_limit.unwrap_or_else(|| "512m".into()); let cpu_limit = payload.cpu_limit.unwrap_or_else(|| "0.5".into()); + let git_token_enc = payload.git_token + .as_deref() + .filter(|t| !t.is_empty()) + .map(crate::crypto::encrypt) + .transpose() + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; sqlx::query( - "INSERT INTO apps (id, name, repo_url, branch, port, webhook_secret, memory_limit, cpu_limit, created_at, updated_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + "INSERT INTO apps (id, name, repo_url, branch, port, webhook_secret, memory_limit, cpu_limit, git_token, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", ) .bind(&id) .bind(&payload.name) @@ -44,6 +50,7 @@ pub async fn create( .bind(&secret) .bind(&memory_limit) .bind(&cpu_limit) + .bind(&git_token_enc) .bind(&now) .bind(&now) .execute(&s.db) @@ -105,6 +112,21 @@ pub async fn update( .execute(&s.db).await .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; } + if let Some(v) = payload.git_token { + if v.is_empty() { + sqlx::query("UPDATE apps SET git_token = NULL, updated_at = ? WHERE id = ?") + .bind(&now).bind(&id) + .execute(&s.db).await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + } else { + let enc = crate::crypto::encrypt(&v) + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + sqlx::query("UPDATE apps SET git_token = ?, updated_at = ? WHERE id = ?") + .bind(enc).bind(&now).bind(&id) + .execute(&s.db).await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + } + } fetch_app(&s, &id).await.map(Json) } From 4fb8c6b2c7ba6df45492252221edd9ab7d7610cd Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 08:32:58 +0000 Subject: [PATCH 09/41] feat: git token management in app detail UI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a 'Git Authentication' card to the app detail page with: - Status badge (Token configured / No token) - Password input to set/update the token - Clear button (only shown when a token is stored) Token is saved/cleared via PATCH /api/apps/:id — no new endpoints needed. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- server/src/routes/ui.rs | 36 +++++++++++++++++++--------- server/templates/app_detail.html | 41 ++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/server/src/routes/ui.rs b/server/src/routes/ui.rs index 9e51a6f..cf0f844 100644 --- a/server/src/routes/ui.rs +++ b/server/src/routes/ui.rs @@ -297,18 +297,32 @@ pub async fn app_detail( } }; + let (git_token_status, git_token_clear_btn) = if app.git_token.is_some() { + ( + r#"Token configured"#.to_string(), + r#""#.to_string(), + ) + } else { + ( + r#"No token — public repos only"#.to_string(), + String::new(), + ) + }; + let body = APP_DETAIL_TMPL - .replace("{{name}}", &app.name) - .replace("{{repo}}", &app.repo_url) - .replace("{{branch}}", &app.branch) - .replace("{{port}}", &app.port.to_string()) - .replace("{{host}}", &host) - .replace("{{app_id}}", &app.id) - .replace("{{secret}}", &app.webhook_secret) - .replace("{{deploy_rows}}", &deploy_rows) - .replace("{{env_rows}}", &env_rows) - .replace("{{c_badge}}", &container_badge(&container_state)) - .replace("{{db_card}}", &db_card); + .replace("{{name}}", &app.name) + .replace("{{repo}}", &app.repo_url) + .replace("{{branch}}", &app.branch) + .replace("{{port}}", &app.port.to_string()) + .replace("{{host}}", &host) + .replace("{{app_id}}", &app.id) + .replace("{{secret}}", &app.webhook_secret) + .replace("{{deploy_rows}}", &deploy_rows) + .replace("{{env_rows}}", &env_rows) + .replace("{{c_badge}}", &container_badge(&container_state)) + .replace("{{db_card}}", &db_card) + .replace("{{git_token_status}}", &git_token_status) + .replace("{{git_token_clear_btn}}", &git_token_clear_btn); Html(page(&app.name, &body)).into_response() } diff --git a/server/templates/app_detail.html b/server/templates/app_detail.html index f071f89..af06614 100644 --- a/server/templates/app_detail.html +++ b/server/templates/app_detail.html @@ -35,6 +35,26 @@ {{db_card}} +
+

Git Authentication

+

+ Required for private repos. Store a Personal Access Token (GitHub: repo scope, + GitLab: read_repository) so deploys can clone without interactive prompts. + Only HTTPS repo URLs are supported; SSH URLs use the server's own key pair. +

+

{{git_token_status}}

+
+
+ + +
+
+ + {{git_token_clear_btn}} +
+
+
+

Environment Variables

@@ -145,6 +165,27 @@ async function deprovisionDb() { if (r.ok) window.location.reload(); else alert('Error: ' + await r.text()); } +async function saveGitToken() { + const tok = document.getElementById('git-token-input').value; + if (!tok) { alert('Enter a token first'); return; } + const r = await fetch('/api/apps/' + APP_ID, { + method: 'PATCH', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({git_token: tok}), + }); + if (r.ok) window.location.reload(); + else alert('Error saving token: ' + await r.text()); +} +async function clearGitToken() { + if (!confirm('Remove the stored git token for ' + APP_ID + '?')) return; + const r = await fetch('/api/apps/' + APP_ID, { + method: 'PATCH', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({git_token: ''}), + }); + if (r.ok) window.location.reload(); + else alert('Error clearing token: ' + await r.text()); +} async function stopApp() { if (!confirm('Stop ' + APP_ID + '?')) return; const r = await fetch('/api/apps/' + APP_ID + '/stop', {method:'POST'}); From def40aa7f97cfc24200a11d27e7dcf12aca44374 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 08:38:22 +0000 Subject: [PATCH 10/41] fix: register PATCH on /api/apps/:id (JS was sending PATCH, route only had PUT) https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- server/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main.rs b/server/src/main.rs index ad3eee9..444a151 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -164,6 +164,7 @@ async fn main() -> anyhow::Result<()> { .route("/api/apps", get(routes::apps::list).post(routes::apps::create)) .route("/api/apps/:id", get(routes::apps::get_one) .put(routes::apps::update) + .patch(routes::apps::update) .delete(routes::apps::delete)) .route("/api/apps/:id/stop", post(routes::apps::stop)) .route("/api/apps/:id/restart", post(routes::apps::restart)) From c7ed5cfe95f7299bb8812b68254ad5ff77162efe Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 08:41:54 +0000 Subject: [PATCH 11/41] fix: use x-access-token username for HTTPS git auth (GitHub compatibility) x-token-auth is Bitbucket/Gitea-specific; GitHub doesn't recognise it and returns a misleading 403 'Write access not granted'. x-access-token is the username GitHub documents for PAT auth and is also accepted by GitLab/Gitea. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- builder/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/build.sh b/builder/build.sh index bc5e79f..06570f5 100755 --- a/builder/build.sh +++ b/builder/build.sh @@ -27,7 +27,7 @@ CLONE_URL="$REPO_URL" if [ -n "${GIT_TOKEN:-}" ]; then case "$REPO_URL" in https://*) - CLONE_URL="https://x-token-auth:${GIT_TOKEN}@${REPO_URL#https://}" + CLONE_URL="https://x-access-token:${GIT_TOKEN}@${REPO_URL#https://}" ;; esac fi From eb9a500987dcc8a9933eb3f33585d07c4b1ffa34 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 08:55:58 +0000 Subject: [PATCH 12/41] feat: per-app public/private visibility toggle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Apps default to private (require login). Marking an app public bypasses the forward_auth check so anyone can access it without logging in. Changes: - db.rs: is_public INTEGER NOT NULL DEFAULT 0 column (idempotent) - models.rs: is_public: i64 on App; is_public: Option on UpdateApp - Cargo.toml: add reqwest for Caddy admin API calls from Rust - routes/apps.rs: PATCH is_public → save flag + immediately push updated Caddy route (no redeploy needed); caddy_route() builds correct JSON for both public (plain reverse_proxy) and private (forward_auth) cases - builder.rs: pass IS_PUBLIC env var to build.sh - build.sh: use IS_PUBLIC to select route type on deploy - ui.rs + app_detail.html: private/public badge + toggle button in subtitle https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- builder/build.sh | 20 +++++-- server/Cargo.toml | 1 + server/src/builder.rs | 1 + server/src/db.rs | 2 + server/src/models.rs | 2 + server/src/routes/apps.rs | 96 ++++++++++++++++++++++++++++++++ server/src/routes/ui.rs | 15 ++++- server/templates/app_detail.html | 12 ++++ 8 files changed, 142 insertions(+), 7 deletions(-) diff --git a/builder/build.sh b/builder/build.sh index 06570f5..b3696e1 100755 --- a/builder/build.sh +++ b/builder/build.sh @@ -149,11 +149,20 @@ if curl --silent --fail "${CADDY_API}/config/" >/dev/null 2>&1; then else ROUTES_URL="${CADDY_API}/config/apps/http/servers/${CADDY_SERVER}/routes" - # Route JSON uses Caddy's forward_auth pattern: - # 1. HIY server checks the session cookie and app-level permission at /auth/verify - # 2. On 2xx → Caddy proxies to the app container - # 3. On anything else (e.g. 302 redirect to /login) → Caddy passes through to the client - ROUTE_JSON=$(python3 -c " + # Route JSON: public apps use plain reverse_proxy; private apps use forward_auth. + if [ "${IS_PUBLIC:-0}" = "1" ]; then + ROUTE_JSON=$(python3 -c " +import json, sys +upstream = sys.argv[1] +app_host = sys.argv[2] +route = { + 'match': [{'host': [app_host]}], + 'handle': [{'handler': 'reverse_proxy', 'upstreams': [{'dial': upstream}]}] +} +print(json.dumps(route)) +" "${UPSTREAM}" "${APP_ID}.${DOMAIN_SUFFIX}") + else + ROUTE_JSON=$(python3 -c " import json, sys upstream = sys.argv[1] app_host = sys.argv[2] @@ -187,6 +196,7 @@ route = { } print(json.dumps(route)) " "${UPSTREAM}" "${APP_ID}.${DOMAIN_SUFFIX}") + fi # Upsert the route for this app. ROUTES=$(curl --silent --fail "${ROUTES_URL}" 2>/dev/null || echo "[]") # Remove existing route for the same host, rebuild list, keep dashboard as catch-all. diff --git a/server/Cargo.toml b/server/Cargo.toml index cc8d19f..4e76770 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -28,3 +28,4 @@ aes-gcm = "0.10" anyhow = "1" futures = "0.3" base64 = "0.22" +reqwest = { version = "0.12", features = ["json", "rustls-tls"], default-features = false } diff --git a/server/src/builder.rs b/server/src/builder.rs index 32a95a6..b194054 100644 --- a/server/src/builder.rs +++ b/server/src/builder.rs @@ -156,6 +156,7 @@ async fn run_build(state: &AppState, deploy_id: &str) -> anyhow::Result<()> { .env("BUILD_DIR", &build_dir) .env("MEMORY_LIMIT", &app.memory_limit) .env("CPU_LIMIT", &app.cpu_limit) + .env("IS_PUBLIC", if app.is_public != 0 { "1" } else { "0" }) .env("DOMAIN_SUFFIX", &domain_suffix) .env("CADDY_API_URL", &caddy_api_url) .stdout(std::process::Stdio::piped()) diff --git a/server/src/db.rs b/server/src/db.rs index 0366a6a..fb34d02 100644 --- a/server/src/db.rs +++ b/server/src/db.rs @@ -108,6 +108,8 @@ pub async fn migrate(pool: &DbPool) -> anyhow::Result<()> { .execute(pool).await; let _ = sqlx::query("ALTER TABLE apps ADD COLUMN git_token TEXT") .execute(pool).await; + let _ = sqlx::query("ALTER TABLE apps ADD COLUMN is_public INTEGER NOT NULL DEFAULT 0") + .execute(pool).await; sqlx::query( r#"CREATE TABLE IF NOT EXISTS databases ( diff --git a/server/src/models.rs b/server/src/models.rs index c6de907..994f5a4 100644 --- a/server/src/models.rs +++ b/server/src/models.rs @@ -15,6 +15,7 @@ pub struct App { /// Encrypted git token for cloning private repos. Never serialised to API responses. #[serde(skip_serializing)] pub git_token: Option, + pub is_public: i64, } #[derive(Debug, Deserialize)] @@ -36,6 +37,7 @@ pub struct UpdateApp { pub memory_limit: Option, pub cpu_limit: Option, pub git_token: Option, + pub is_public: Option, } #[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] diff --git a/server/src/routes/apps.rs b/server/src/routes/apps.rs index f354498..cce0e68 100644 --- a/server/src/routes/apps.rs +++ b/server/src/routes/apps.rs @@ -12,6 +12,91 @@ use crate::{ AppState, }; +/// Build the Caddy route JSON for an app. +/// Public apps get a plain reverse_proxy; private apps get forward_auth via HIY. +fn caddy_route(app_host: &str, upstream: &str, is_public: bool) -> serde_json::Value { + if is_public { + serde_json::json!({ + "match": [{"host": [app_host]}], + "handle": [{"handler": "reverse_proxy", "upstreams": [{"dial": upstream}]}] + }) + } else { + serde_json::json!({ + "match": [{"host": [app_host]}], + "handle": [{ + "handler": "subroute", + "routes": [{ + "handle": [{ + "handler": "reverse_proxy", + "rewrite": {"method": "GET", "uri": "/auth/verify"}, + "headers": {"request": {"set": { + "X-Forwarded-Method": ["{http.request.method}"], + "X-Forwarded-Uri": ["{http.request.uri}"], + "X-Forwarded-Host": ["{http.request.host}"], + "X-Forwarded-Proto": ["{http.request.scheme}"] + }}}, + "upstreams": [{"dial": "server:3000"}], + "handle_response": [{ + "match": {"status_code": [2]}, + "routes": [{"handle": [{"handler": "reverse_proxy", "upstreams": [{"dial": upstream}]}]}] + }] + }] + }] + }] + }) + } +} + +/// Push a visibility change to Caddy without requiring a full redeploy. +/// Best-effort: logs a warning on failure but does not surface an error to the caller. +async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) { + if let Err(e) = try_push_visibility_to_caddy(app_id, port, is_public).await { + tracing::warn!("caddy visibility update for {}: {}", app_id, e); + } +} + +async fn try_push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) -> anyhow::Result<()> { + let caddy_api = std::env::var("CADDY_API_URL").unwrap_or_else(|_| "http://caddy:2019".into()); + let domain = std::env::var("DOMAIN_SUFFIX").unwrap_or_else(|_| "localhost".into()); + let app_host = format!("{}.{}", app_id, domain); + let upstream = format!("hiy-{}:{}", app_id, port); + let client = reqwest::Client::new(); + + // Discover the Caddy server name (Caddyfile adapter names it "srv0"). + let servers: serde_json::Value = client + .get(format!("{}/config/apps/http/servers/", caddy_api)) + .send().await? + .json().await?; + let server_name = servers.as_object() + .and_then(|m| m.keys().next().cloned()) + .ok_or_else(|| anyhow::anyhow!("no servers in Caddy config"))?; + + let routes_url = format!("{}/config/apps/http/servers/{}/routes", caddy_api, server_name); + + let routes: Vec = client.get(&routes_url).send().await?.json().await?; + + let dashboard = serde_json::json!({ + "handle": [{"handler": "reverse_proxy", "upstreams": [{"dial": "server:3000"}]}] + }); + + let mut updated: Vec = routes.into_iter() + .filter(|r| { + let is_this_app = r.pointer("/match/0/host") + .and_then(|h| h.as_array()) + .map(|hosts| hosts.iter().any(|h| h.as_str() == Some(app_host.as_str()))) + .unwrap_or(false); + let is_catchall = r.get("match").is_none(); + !is_this_app && !is_catchall + }) + .collect(); + + updated.insert(0, caddy_route(&app_host, &upstream, is_public)); + updated.push(dashboard); + + client.patch(&routes_url).json(&updated).send().await?; + Ok(()) +} + pub async fn list(State(s): State) -> Result>, StatusCode> { let apps = sqlx::query_as::<_, App>("SELECT * FROM apps ORDER BY created_at DESC") .fetch_all(&s.db) @@ -112,6 +197,17 @@ pub async fn update( .execute(&s.db).await .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; } + if let Some(v) = payload.is_public { + let flag: i64 = if v { 1 } else { 0 }; + sqlx::query("UPDATE apps SET is_public = ?, updated_at = ? WHERE id = ?") + .bind(flag).bind(&now).bind(&id) + .execute(&s.db).await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + // Immediately reconfigure the Caddy route so the change takes effect + // without a full redeploy. + let app = fetch_app(&s, &id).await?; + push_visibility_to_caddy(&id, app.port, v).await; + } if let Some(v) = payload.git_token { if v.is_empty() { sqlx::query("UPDATE apps SET git_token = NULL, updated_at = ? WHERE id = ?") diff --git a/server/src/routes/ui.rs b/server/src/routes/ui.rs index cf0f844..10e25a1 100644 --- a/server/src/routes/ui.rs +++ b/server/src/routes/ui.rs @@ -297,6 +297,14 @@ pub async fn app_detail( } }; + let is_public = app.is_public != 0; + let visibility_badge = if is_public { + r#"public"# + } else { + r#"private"# + }; + let visibility_toggle_label = if is_public { "Make private" } else { "Make public" }; + let (git_token_status, git_token_clear_btn) = if app.git_token.is_some() { ( r#"Token configured"#.to_string(), @@ -321,8 +329,11 @@ pub async fn app_detail( .replace("{{env_rows}}", &env_rows) .replace("{{c_badge}}", &container_badge(&container_state)) .replace("{{db_card}}", &db_card) - .replace("{{git_token_status}}", &git_token_status) - .replace("{{git_token_clear_btn}}", &git_token_clear_btn); + .replace("{{git_token_status}}", &git_token_status) + .replace("{{git_token_clear_btn}}", &git_token_clear_btn) + .replace("{{visibility_badge}}", visibility_badge) + .replace("{{visibility_toggle_label}}", visibility_toggle_label) + .replace("{{is_public_js}}", if is_public { "true" } else { "false" }); Html(page(&app.name, &body)).into_response() } diff --git a/server/templates/app_detail.html b/server/templates/app_detail.html index af06614..c4e2380 100644 --- a/server/templates/app_detail.html +++ b/server/templates/app_detail.html @@ -14,6 +14,8 @@  ·  branch {{branch}}  ·  port {{port}}  ·  {{name}}.{{host}} +  ·  {{visibility_badge}} +

@@ -165,6 +167,16 @@ async function deprovisionDb() { if (r.ok) window.location.reload(); else alert('Error: ' + await r.text()); } +const IS_PUBLIC = {{is_public_js}}; +async function toggleVisibility() { + const r = await fetch('/api/apps/' + APP_ID, { + method: 'PATCH', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({is_public: !IS_PUBLIC}), + }); + if (r.ok) window.location.reload(); + else alert('Error updating visibility: ' + await r.text()); +} async function saveGitToken() { const tok = document.getElementById('git-token-input').value; if (!tok) { alert('Enter a token first'); return; } From 54ceedbe5af108b0417431239f3a67ecc7484e7e Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 09:45:52 +0000 Subject: [PATCH 13/41] feat: add Settings card to app detail page (repo, branch, port, memory, cpu) https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- server/src/routes/ui.rs | 2 ++ server/templates/app_detail.html | 30 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/server/src/routes/ui.rs b/server/src/routes/ui.rs index 10e25a1..fb1bb23 100644 --- a/server/src/routes/ui.rs +++ b/server/src/routes/ui.rs @@ -325,6 +325,8 @@ pub async fn app_detail( .replace("{{host}}", &host) .replace("{{app_id}}", &app.id) .replace("{{secret}}", &app.webhook_secret) + .replace("{{memory_limit}}", &app.memory_limit) + .replace("{{cpu_limit}}", &app.cpu_limit) .replace("{{deploy_rows}}", &deploy_rows) .replace("{{env_rows}}", &env_rows) .replace("{{c_badge}}", &container_badge(&container_state)) diff --git a/server/templates/app_detail.html b/server/templates/app_detail.html index c4e2380..3ba7b00 100644 --- a/server/templates/app_detail.html +++ b/server/templates/app_detail.html @@ -35,6 +35,20 @@
+
+

Settings

+
+
+
+
+
+
+
+
+
+ +
+ {{db_card}}
@@ -168,6 +182,22 @@ async function deprovisionDb() { else alert('Error: ' + await r.text()); } const IS_PUBLIC = {{is_public_js}}; +async function saveSettings() { + const body = { + repo_url: document.getElementById('cfg-repo').value, + branch: document.getElementById('cfg-branch').value, + port: parseInt(document.getElementById('cfg-port').value, 10), + memory_limit: document.getElementById('cfg-memory').value, + cpu_limit: document.getElementById('cfg-cpu').value, + }; + const r = await fetch('/api/apps/' + APP_ID, { + method: 'PATCH', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify(body), + }); + if (r.ok) window.location.reload(); + else alert('Error saving settings: ' + await r.text()); +} async function toggleVisibility() { const r = await fetch('/api/apps/' + APP_ID, { method: 'PATCH', From b6e223291a204a96cea1e8263f0adad847eecb98 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 10:44:19 +0000 Subject: [PATCH 14/41] feat: add Forgejo service + Postgres database provisioning - docker-compose.yml: Forgejo service on hiy-net, configured via env vars - postgres-init/01-forgejo.sql: creates forgejo user + database on first Postgres init - .env.example: document FORGEJO_DB_PASSWORD and FORGEJO_DOMAIN Routing: add FORGEJO_DOMAIN as an app in HIY pointing to forgejo:3000, or add a Caddyfile block manually. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/.env.example | 4 ++++ infra/docker-compose.yml | 25 +++++++++++++++++++++++++ infra/postgres-init/01-forgejo.sql | 5 +++++ 3 files changed, 34 insertions(+) create mode 100644 infra/postgres-init/01-forgejo.sql diff --git a/infra/.env.example b/infra/.env.example index 2001127..73eaa98 100644 --- a/infra/.env.example +++ b/infra/.env.example @@ -11,3 +11,7 @@ HIY_ADMIN_PASS=changeme # Postgres admin password — used by the shared cluster. # App schemas get their own scoped users; this password never leaves the server. POSTGRES_PASSWORD=changeme + +# Forgejo (optional — only needed if you add the forgejo service to docker-compose.yml). +FORGEJO_DB_PASSWORD=changeme +FORGEJO_DOMAIN=git.yourdomain.com diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 33a0e52..17024e8 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -70,6 +70,30 @@ services: POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} volumes: - hiy-pg-data:/var/lib/postgresql/data + # SQL files here run once on first init (ignored if data volume already exists). + - ./postgres-init:/docker-entrypoint-initdb.d:ro + networks: + - hiy-net + + # ── Forgejo (self-hosted Git) ────────────────────────────────────────────── + forgejo: + image: docker.io/codeberg.org/forgejo/forgejo:10 + restart: unless-stopped + environment: + USER_UID: 1000 + USER_GID: 1000 + FORGEJO__database__DB_TYPE: postgres + FORGEJO__database__HOST: postgres:5432 + FORGEJO__database__NAME: forgejo + FORGEJO__database__USER: forgejo + FORGEJO__database__PASSWD: ${FORGEJO_DB_PASSWORD} + FORGEJO__server__DOMAIN: ${FORGEJO_DOMAIN} + FORGEJO__server__ROOT_URL: https://${FORGEJO_DOMAIN}/ + FORGEJO__server__SSH_DOMAIN: ${FORGEJO_DOMAIN} + volumes: + - forgejo-data:/data + depends_on: + - postgres networks: - hiy-net @@ -142,6 +166,7 @@ networks: volumes: hiy-data: + forgejo-data: caddy-data: caddy-config: hiy-pg-data: diff --git a/infra/postgres-init/01-forgejo.sql b/infra/postgres-init/01-forgejo.sql new file mode 100644 index 0000000..1031b90 --- /dev/null +++ b/infra/postgres-init/01-forgejo.sql @@ -0,0 +1,5 @@ +-- Create a dedicated database and user for Forgejo. +-- This script runs once when the Postgres container is first initialised. +-- If the container already has data it is skipped automatically. +CREATE USER forgejo WITH PASSWORD 'CHANGE_ME'; +CREATE DATABASE forgejo OWNER forgejo; From 06a8cc189a0542dfa7c5e8befd347609f33bd4ff Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 10:46:09 +0000 Subject: [PATCH 15/41] fix: remove docker.io/ prefix from Forgejo image (Codeberg registry) https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 17024e8..8d69b1b 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -77,7 +77,7 @@ services: # ── Forgejo (self-hosted Git) ────────────────────────────────────────────── forgejo: - image: docker.io/codeberg.org/forgejo/forgejo:10 + image: codeberg.org/forgejo/forgejo:10 restart: unless-stopped environment: USER_UID: 1000 From 97929c11dea1fd3cf47e1df6da961711d8627949 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 10:49:19 +0000 Subject: [PATCH 16/41] fix: add static Caddyfile block for Forgejo (forgejo:3000, not hiy-forgejo) Forgejo is a docker-compose service, not a HIY-deployed container. HIY's dynamic routing uses the hiy-: naming convention which doesn't match. A static block pointing to forgejo:3000 is the correct approach. FORGEJO_DOMAIN falls back to forgejo.localhost so Caddy starts cleanly on installs that don't use Forgejo. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- proxy/Caddyfile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/proxy/Caddyfile b/proxy/Caddyfile index 07d9ac3..99e507a 100644 --- a/proxy/Caddyfile +++ b/proxy/Caddyfile @@ -31,6 +31,15 @@ reverse_proxy server:3000 } +# ── Static services (not managed by HIY) ────────────────────────────────────── + +# Set FORGEJO_DOMAIN in .env (e.g. git.yourdomain.com). Falls back to a +# non-routable placeholder so Caddy starts cleanly even if Forgejo isn't used. +{$FORGEJO_DOMAIN:forgejo.localhost} { + tls {$ACME_EMAIL:internal} + reverse_proxy forgejo:3000 +} + # Deployed apps are added here dynamically by hiy-server via the Caddy API. # Each entry looks like: # From 9ba81bd809d2567418e07c26e5f28a384f3f274c Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 10:56:04 +0000 Subject: [PATCH 17/41] fix: drop Caddy --resume, restore app routes from DB on startup --resume caused Caddyfile changes (e.g. new Forgejo block) to be silently ignored on restart because Caddy preferred its saved in-memory config. Instead, Caddy now always starts clean from the Caddyfile, and the HIY server re-registers every app's Caddy route from the DB on startup (restore_caddy_routes). This gives us the best of both worlds: - Caddyfile changes (static services, TLS config) are always picked up - App routes are restored automatically without needing a redeploy https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 2 +- server/src/main.rs | 8 ++++++++ server/src/routes/apps.rs | 29 +++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 8d69b1b..53c1a97 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -113,7 +113,7 @@ services: - ../proxy/Caddyfile:/etc/caddy/Caddyfile:ro - caddy-data:/data - caddy-config:/config - command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile --resume + command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile networks: - hiy-net - default diff --git a/server/src/main.rs b/server/src/main.rs index 444a151..152bfe8 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -154,6 +154,14 @@ async fn main() -> anyhow::Result<()> { builder::build_worker(worker_state).await; }); + // Re-register all app Caddy routes from the DB on startup. + // Caddy no longer uses --resume, so routes must be restored each time the + // stack restarts (ensures Caddyfile changes are always picked up). + let restore_db = state.db.clone(); + tokio::spawn(async move { + routes::apps::restore_caddy_routes(&restore_db).await; + }); + // ── Protected routes (admin login required) ─────────────────────────────── let protected = Router::new() .route("/", get(routes::ui::index)) diff --git a/server/src/routes/apps.rs b/server/src/routes/apps.rs index cce0e68..58f8345 100644 --- a/server/src/routes/apps.rs +++ b/server/src/routes/apps.rs @@ -47,6 +47,35 @@ fn caddy_route(app_host: &str, upstream: &str, is_public: bool) -> serde_json::V } } +/// Re-register every app's Caddy route from the database. +/// Called at startup so that removing `--resume` from Caddy doesn't lose +/// routes when the stack restarts. +pub async fn restore_caddy_routes(db: &crate::DbPool) { + // Give Caddy a moment to finish loading the Caddyfile before we PATCH it. + let caddy_api = std::env::var("CADDY_API_URL").unwrap_or_else(|_| "http://caddy:2019".into()); + let client = reqwest::Client::new(); + for attempt in 1..=10u32 { + tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + if client.get(format!("{}/config/", caddy_api)).send().await.is_ok() { + break; + } + tracing::info!("restore_caddy_routes: waiting for Caddy ({}/10)…", attempt); + } + + let apps = match sqlx::query_as::<_, crate::models::App>("SELECT * FROM apps") + .fetch_all(db) + .await + { + Ok(a) => a, + Err(e) => { tracing::error!("restore_caddy_routes: DB error: {}", e); return; } + }; + + for app in &apps { + push_visibility_to_caddy(&app.id, app.port, app.is_public != 0).await; + } + tracing::info!("restore_caddy_routes: registered {} app routes", apps.len()); +} + /// Push a visibility change to Caddy without requiring a full redeploy. /// Best-effort: logs a warning on failure but does not surface an error to the caller. async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) { From 36b89d7620f795f98aacbdc5e06a72b5caee5810 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 11:11:53 +0000 Subject: [PATCH 18/41] fix: use FORGEJO_DB_PASSWORD env var in postgres init script Replaced hardcoded 'CHANGE_ME' in the SQL init file with a shell script that reads FORGEJO_DB_PASSWORD from the environment. Also pass the variable into the postgres service in docker-compose.yml so it is available at init time. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 1 + infra/postgres-init/01-forgejo.sh | 10 ++++++++++ infra/postgres-init/01-forgejo.sql | 5 ----- 3 files changed, 11 insertions(+), 5 deletions(-) create mode 100755 infra/postgres-init/01-forgejo.sh delete mode 100644 infra/postgres-init/01-forgejo.sql diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 53c1a97..d8f5299 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -68,6 +68,7 @@ services: POSTGRES_DB: hiy POSTGRES_USER: hiy_admin POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + FORGEJO_DB_PASSWORD: ${FORGEJO_DB_PASSWORD} volumes: - hiy-pg-data:/var/lib/postgresql/data # SQL files here run once on first init (ignored if data volume already exists). diff --git a/infra/postgres-init/01-forgejo.sh b/infra/postgres-init/01-forgejo.sh new file mode 100755 index 0000000..b401aa6 --- /dev/null +++ b/infra/postgres-init/01-forgejo.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Create a dedicated database and user for Forgejo. +# Runs once when the Postgres container is first initialised. +# FORGEJO_DB_PASSWORD must be set in the environment (via docker-compose.yml). +set -euo pipefail + +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL + CREATE USER forgejo WITH PASSWORD '${FORGEJO_DB_PASSWORD}'; + CREATE DATABASE forgejo OWNER forgejo; +EOSQL diff --git a/infra/postgres-init/01-forgejo.sql b/infra/postgres-init/01-forgejo.sql deleted file mode 100644 index 1031b90..0000000 --- a/infra/postgres-init/01-forgejo.sql +++ /dev/null @@ -1,5 +0,0 @@ --- Create a dedicated database and user for Forgejo. --- This script runs once when the Postgres container is first initialised. --- If the container already has data it is skipped automatically. -CREATE USER forgejo WITH PASSWORD 'CHANGE_ME'; -CREATE DATABASE forgejo OWNER forgejo; From ea172ae336d950c61320549c65e8a8c1ea2cc5e6 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 11:21:09 +0000 Subject: [PATCH 19/41] feat: lock Forgejo install wizard via env var Sets FORGEJO__security__INSTALL_LOCK=true so Forgejo skips the first-run wizard and uses the env var configuration directly. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index d8f5299..d81ce6f 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -91,6 +91,8 @@ services: FORGEJO__server__DOMAIN: ${FORGEJO_DOMAIN} FORGEJO__server__ROOT_URL: https://${FORGEJO_DOMAIN}/ FORGEJO__server__SSH_DOMAIN: ${FORGEJO_DOMAIN} + # Skip the first-run wizard — everything is configured via env vars above. + FORGEJO__security__INSTALL_LOCK: "true" volumes: - forgejo-data:/data depends_on: From 22a6ab103c3e06f55f755092aac4e94d9fb75edf Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 11:27:10 +0000 Subject: [PATCH 20/41] fix: wait for Postgres to be ready before starting Forgejo Adds a pg_isready healthcheck to the postgres service and upgrades the Forgejo depends_on to condition: service_healthy, preventing the "connection refused" crash on startup. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index d81ce6f..9f60585 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -73,6 +73,11 @@ services: - hiy-pg-data:/var/lib/postgresql/data # SQL files here run once on first init (ignored if data volume already exists). - ./postgres-init:/docker-entrypoint-initdb.d:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"] + interval: 5s + timeout: 5s + retries: 10 networks: - hiy-net @@ -96,7 +101,8 @@ services: volumes: - forgejo-data:/data depends_on: - - postgres + postgres: + condition: service_healthy networks: - hiy-net From bd863cdf33d12aaef27284dccecfe6efac889f68 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 11:33:57 +0000 Subject: [PATCH 21/41] fix: hardcode pg_isready args to avoid podman-compose $$ escaping issue https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 9f60585..16aa99a 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -74,7 +74,7 @@ services: # SQL files here run once on first init (ignored if data volume already exists). - ./postgres-init:/docker-entrypoint-initdb.d:ro healthcheck: - test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"] + test: ["CMD-SHELL", "pg_isready -U hiy_admin -d hiy"] interval: 5s timeout: 5s retries: 10 From de4b5c49ab453edd20128b7268e7267d6c0cfd68 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 11:41:27 +0000 Subject: [PATCH 22/41] =?UTF-8?q?fix:=20drop=20service=5Fhealthy=20depends?= =?UTF-8?q?=5Fon=20=E2=80=94=20podman-compose=20doesn't=20support=20it?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Forgejo restart: unless-stopped handles the retry loop until Postgres is ready. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 16aa99a..d81ce6f 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -73,11 +73,6 @@ services: - hiy-pg-data:/var/lib/postgresql/data # SQL files here run once on first init (ignored if data volume already exists). - ./postgres-init:/docker-entrypoint-initdb.d:ro - healthcheck: - test: ["CMD-SHELL", "pg_isready -U hiy_admin -d hiy"] - interval: 5s - timeout: 5s - retries: 10 networks: - hiy-net @@ -101,8 +96,7 @@ services: volumes: - forgejo-data:/data depends_on: - postgres: - condition: service_healthy + - postgres networks: - hiy-net From d3ef4d2030db2b75bceb1d120e775df64a7fad25 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 11:47:58 +0000 Subject: [PATCH 23/41] =?UTF-8?q?fix:=20use=20/bin/sh=20in=20postgres=20in?= =?UTF-8?q?it=20script=20=E2=80=94=20Alpine=20has=20no=20bash?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/postgres-init/01-forgejo.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/infra/postgres-init/01-forgejo.sh b/infra/postgres-init/01-forgejo.sh index b401aa6..7c7d7c3 100755 --- a/infra/postgres-init/01-forgejo.sh +++ b/infra/postgres-init/01-forgejo.sh @@ -1,8 +1,8 @@ -#!/usr/bin/env bash +#!/bin/sh # Create a dedicated database and user for Forgejo. # Runs once when the Postgres container is first initialised. # FORGEJO_DB_PASSWORD must be set in the environment (via docker-compose.yml). -set -euo pipefail +set -e psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL CREATE USER forgejo WITH PASSWORD '${FORGEJO_DB_PASSWORD}'; From e8d303f1848652b10372a7d99d59cf88a09c1d23 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 15:06:16 +0000 Subject: [PATCH 24/41] feat: extend backup script and add restore script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit backup.sh now covers all data: - SQLite via podman exec into server container (fallback to host path) - Postgres via pg_dumpall inside postgres container - Forgejo data volume via podman volume export - Caddy TLS certificates via podman volume export - .env file (plaintext secrets — store archive securely) restore.sh reverses each step: imports volumes, restores Postgres, restores SQLite, optionally restores .env (--force to overwrite). Both scripts find containers dynamically via compose service labels so they work regardless of the container name podman-compose assigns. .env.example documents HIY_BACKUP_DIR, HIY_BACKUP_REMOTE, HIY_BACKUP_RETAIN_DAYS. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/.env.example | 9 +++ infra/backup.sh | 119 ++++++++++++++++++++++++++++--------- infra/restore.sh | 143 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 245 insertions(+), 26 deletions(-) create mode 100755 infra/restore.sh diff --git a/infra/.env.example b/infra/.env.example index 73eaa98..5625243 100644 --- a/infra/.env.example +++ b/infra/.env.example @@ -15,3 +15,12 @@ POSTGRES_PASSWORD=changeme # Forgejo (optional — only needed if you add the forgejo service to docker-compose.yml). FORGEJO_DB_PASSWORD=changeme FORGEJO_DOMAIN=git.yourdomain.com + +# ── Backup (infra/backup.sh) ────────────────────────────────────────────────── +# Local directory to store backup archives. +HIY_BACKUP_DIR=/mnt/usb/hiy-backups +# Optional rclone remote (e.g. "b2:mybucket/hiy", "s3:mybucket/hiy"). +# Requires rclone installed and configured. Leave blank to skip remote upload. +HIY_BACKUP_REMOTE= +# How many days to keep local archives (default 30). +HIY_BACKUP_RETAIN_DAYS=30 diff --git a/infra/backup.sh b/infra/backup.sh index 84e7f8e..a15e4c9 100755 --- a/infra/backup.sh +++ b/infra/backup.sh @@ -2,18 +2,22 @@ # HIY daily backup script # # What is backed up: -# 1. SQLite database (hiy.db) — apps, deploys, env vars, users -# 2. Env files directory — decrypted env files written per deploy -# 3. Git repos — bare repos for git-push deploys +# 1. SQLite database (hiy.db) — apps, deploys, env vars, users +# 2. Env files — per-deploy decrypted env files +# 3. Git repos — bare repos for git-push deploys +# 4. Postgres — pg_dumpall (hiy + forgejo databases) +# 5. Forgejo data volume — repositories, avatars, LFS objects +# 6. Caddy TLS certificates — caddy-data volume +# 7. .env file — secrets (handle the archive with care) # # Destination options (mutually exclusive; set one): # HIY_BACKUP_DIR — local path (e.g. /mnt/usb/hiy-backups, default /tmp/hiy-backups) # HIY_BACKUP_REMOTE — rclone remote:path (e.g. "b2:mybucket/hiy") # requires rclone installed and configured # -# Retention: 30 days (local only; remote retention is managed by the storage provider) +# Retention: 30 days local (remote retention managed by the storage provider). # -# Suggested cron (run as the same user as hiy-server): +# Suggested cron (run as the same user that owns the containers): # 0 3 * * * /path/to/infra/backup.sh >> /var/log/hiy-backup.log 2>&1 set -euo pipefail @@ -24,6 +28,11 @@ BACKUP_DIR="${HIY_BACKUP_DIR:-/tmp/hiy-backups}" BACKUP_REMOTE="${HIY_BACKUP_REMOTE:-}" RETAIN_DAYS="${HIY_BACKUP_RETAIN_DAYS:-30}" +# Load .env from the repo root (one level up from infra/) so the backup cron +# can find HIY_DATA_DIR, container names, etc. without extra shell setup. +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ENV_FILE="${SCRIPT_DIR}/../.env" + TIMESTAMP=$(date +%Y%m%d-%H%M%S) ARCHIVE_NAME="hiy-backup-${TIMESTAMP}.tar.gz" STAGING="${BACKUP_DIR}/staging-${TIMESTAMP}" @@ -31,33 +40,91 @@ STAGING="${BACKUP_DIR}/staging-${TIMESTAMP}" log() { echo "[hiy-backup] $(date '+%H:%M:%S') $*"; } log "=== HIY Backup ===" -log "Data dir: ${HIY_DATA_DIR}" -log "Staging: ${STAGING}" +log "Data dir : ${HIY_DATA_DIR}" +log "Staging : ${STAGING}" -# ── 1. Stage files ───────────────────────────────────────────────────────────── mkdir -p "${STAGING}" -# SQLite: use the .dump command to produce a portable SQL text dump. -if [ -f "${HIY_DATA_DIR}/hiy.db" ]; then - log "Dumping SQLite database…" +# ── Helper: find a running container by compose service label ────────────────── +find_container() { + local service="$1" + podman ps --filter "label=com.docker.compose.service=${service}" \ + --format '{{.Names}}' | head -1 +} + +# ── 1. SQLite ────────────────────────────────────────────────────────────────── +log "--- SQLite ---" +SERVER_CTR=$(find_container server) +if [ -n "${SERVER_CTR}" ]; then + log "Dumping hiy.db via container ${SERVER_CTR}…" + podman exec "${SERVER_CTR}" sqlite3 "${HIY_DATA_DIR}/hiy.db" .dump \ + > "${STAGING}/hiy.sql" +elif [ -f "${HIY_DATA_DIR}/hiy.db" ]; then + log "Server container not running — dumping from host path…" sqlite3 "${HIY_DATA_DIR}/hiy.db" .dump > "${STAGING}/hiy.sql" else - log "WARNING: ${HIY_DATA_DIR}/hiy.db not found — skipping SQLite dump" + log "WARNING: hiy.db not found — skipping SQLite dump" fi -# Env files (contain decrypted secrets — handle with care). -if [ -d "${HIY_DATA_DIR}/envs" ]; then - log "Copying env files…" - cp -r "${HIY_DATA_DIR}/envs" "${STAGING}/envs" +# ── 2. Env files ─────────────────────────────────────────────────────────────── +log "--- Env files ---" +if [ -n "${SERVER_CTR}" ]; then + podman exec "${SERVER_CTR}" sh -c \ + "[ -d ${HIY_DATA_DIR}/envs ] && tar -C ${HIY_DATA_DIR} -czf - envs" \ + > "${STAGING}/envs.tar.gz" 2>/dev/null || true +elif [ -d "${HIY_DATA_DIR}/envs" ]; then + tar -czf "${STAGING}/envs.tar.gz" -C "${HIY_DATA_DIR}" envs fi -# Bare git repos. -if [ -d "${HIY_DATA_DIR}/repos" ]; then - log "Copying git repos…" - cp -r "${HIY_DATA_DIR}/repos" "${STAGING}/repos" +# ── 3. Git repos ─────────────────────────────────────────────────────────────── +log "--- Git repos ---" +if [ -n "${SERVER_CTR}" ]; then + podman exec "${SERVER_CTR}" sh -c \ + "[ -d ${HIY_DATA_DIR}/repos ] && tar -C ${HIY_DATA_DIR} -czf - repos" \ + > "${STAGING}/repos.tar.gz" 2>/dev/null || true +elif [ -d "${HIY_DATA_DIR}/repos" ]; then + tar -czf "${STAGING}/repos.tar.gz" -C "${HIY_DATA_DIR}" repos fi -# ── 2. Create archive ────────────────────────────────────────────────────────── +# ── 4. Postgres ──────────────────────────────────────────────────────────────── +log "--- Postgres ---" +PG_CTR=$(find_container postgres) +if [ -n "${PG_CTR}" ]; then + log "Running pg_dumpall via container ${PG_CTR}…" + podman exec "${PG_CTR}" pg_dumpall -U hiy_admin \ + > "${STAGING}/postgres.sql" +else + log "WARNING: postgres container not running — skipping Postgres dump" +fi + +# ── 5. Forgejo data volume ───────────────────────────────────────────────────── +log "--- Forgejo volume ---" +if podman volume exists forgejo-data 2>/dev/null; then + log "Exporting forgejo-data volume…" + podman volume export forgejo-data > "${STAGING}/forgejo-data.tar" +else + log "forgejo-data volume not found — skipping" +fi + +# ── 6. Caddy TLS certificates ────────────────────────────────────────────────── +log "--- Caddy volume ---" +if podman volume exists caddy-data 2>/dev/null; then + log "Exporting caddy-data volume…" + podman volume export caddy-data > "${STAGING}/caddy-data.tar" +else + log "caddy-data volume not found — skipping" +fi + +# ── 7. .env file ─────────────────────────────────────────────────────────────── +log "--- .env ---" +if [ -f "${ENV_FILE}" ]; then + cp "${ENV_FILE}" "${STAGING}/dot-env" + log "WARNING: archive contains plaintext secrets — store it securely" +else + log ".env not found at ${ENV_FILE} — skipping" +fi + +# ── Create archive ───────────────────────────────────────────────────────────── mkdir -p "${BACKUP_DIR}" ARCHIVE_PATH="${BACKUP_DIR}/${ARCHIVE_NAME}" log "Creating archive: ${ARCHIVE_PATH}" @@ -67,19 +134,19 @@ rm -rf "${STAGING}" ARCHIVE_SIZE=$(du -sh "${ARCHIVE_PATH}" | cut -f1) log "Archive size: ${ARCHIVE_SIZE}" -# ── 3. Upload to remote (optional) ──────────────────────────────────────────── +# ── Upload to remote (optional) ──────────────────────────────────────────────── if [ -n "${BACKUP_REMOTE}" ]; then if command -v rclone &>/dev/null; then - log "Uploading to remote: ${BACKUP_REMOTE}" + log "Uploading to ${BACKUP_REMOTE}…" rclone copy "${ARCHIVE_PATH}" "${BACKUP_REMOTE}/" log "Upload complete." else - log "WARNING: HIY_BACKUP_REMOTE is set but rclone is not installed — skipping upload" - log "Install rclone: https://rclone.org/install/" + log "WARNING: HIY_BACKUP_REMOTE is set but rclone is not installed — skipping" + log "Install: https://rclone.org/install/" fi fi -# ── 4. Rotate old local backups ──────────────────────────────────────────────── +# ── Rotate old local backups ─────────────────────────────────────────────────── log "Removing local backups older than ${RETAIN_DAYS} days…" find "${BACKUP_DIR}" -maxdepth 1 -name 'hiy-backup-*.tar.gz' \ -mtime "+${RETAIN_DAYS}" -delete diff --git a/infra/restore.sh b/infra/restore.sh new file mode 100755 index 0000000..4595a35 --- /dev/null +++ b/infra/restore.sh @@ -0,0 +1,143 @@ +#!/usr/bin/env bash +# HIY restore script +# +# Restores a backup archive produced by infra/backup.sh. +# +# Usage: +# ./infra/restore.sh /path/to/hiy-backup-20260101-030000.tar.gz +# +# What is restored: +# 1. SQLite database (hiy.db) +# 2. Env files and git repos +# 3. Postgres databases (pg_dumpall dump) +# 4. Forgejo data volume +# 5. Caddy TLS certificates +# 6. .env file (optional — skipped if already present unless --force is passed) +# +# ⚠ Run this with the stack STOPPED, then bring it back up afterwards: +# podman compose -f infra/docker-compose.yml down +# ./infra/restore.sh hiy-backup-*.tar.gz +# podman compose -f infra/docker-compose.yml up -d + +set -euo pipefail + +ARCHIVE="${1:-}" +FORCE="${2:-}" + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ENV_FILE="${SCRIPT_DIR}/../.env" +HIY_DATA_DIR="${HIY_DATA_DIR:-/data}" + +log() { echo "[hiy-restore] $(date '+%H:%M:%S') $*"; } +die() { log "ERROR: $*"; exit 1; } + +# ── Validate ─────────────────────────────────────────────────────────────────── +[ -z "${ARCHIVE}" ] && die "Usage: $0 [--force]" +[ -f "${ARCHIVE}" ] || die "Archive not found: ${ARCHIVE}" + +WORK_DIR=$(mktemp -d) +trap 'rm -rf "${WORK_DIR}"' EXIT + +log "=== HIY Restore ===" +log "Archive : ${ARCHIVE}" +log "Work dir: ${WORK_DIR}" + +log "Extracting archive…" +tar -xzf "${ARCHIVE}" -C "${WORK_DIR}" + +# ── Helper: find a running container by compose service label ────────────────── +find_container() { + local service="$1" + podman ps --filter "label=com.docker.compose.service=${service}" \ + --format '{{.Names}}' | head -1 +} + +# ── 1. .env file ─────────────────────────────────────────────────────────────── +log "--- .env ---" +if [ -f "${WORK_DIR}/dot-env" ]; then + if [ -f "${ENV_FILE}" ] && [ "${FORCE}" != "--force" ]; then + log "SKIP: ${ENV_FILE} already exists (pass --force to overwrite)" + else + cp "${WORK_DIR}/dot-env" "${ENV_FILE}" + log "Restored .env to ${ENV_FILE}" + fi +else + log "No .env in archive — skipping" +fi + +# ── 2. SQLite ────────────────────────────────────────────────────────────────── +log "--- SQLite ---" +if [ -f "${WORK_DIR}/hiy.sql" ]; then + DB_PATH="${HIY_DATA_DIR}/hiy.db" + mkdir -p "$(dirname "${DB_PATH}")" + if [ -f "${DB_PATH}" ]; then + log "Moving existing hiy.db to hiy.db.bak…" + mv "${DB_PATH}" "${DB_PATH}.bak" + fi + log "Restoring hiy.db…" + sqlite3 "${DB_PATH}" < "${WORK_DIR}/hiy.sql" + log "SQLite restored." +else + log "No hiy.sql in archive — skipping" +fi + +# ── 3. Env files & git repos ─────────────────────────────────────────────────── +log "--- Env files ---" +if [ -f "${WORK_DIR}/envs.tar.gz" ]; then + log "Restoring envs/…" + tar -xzf "${WORK_DIR}/envs.tar.gz" -C "${HIY_DATA_DIR}" +fi + +log "--- Git repos ---" +if [ -f "${WORK_DIR}/repos.tar.gz" ]; then + log "Restoring repos/…" + tar -xzf "${WORK_DIR}/repos.tar.gz" -C "${HIY_DATA_DIR}" +fi + +# ── 4. Postgres ──────────────────────────────────────────────────────────────── +log "--- Postgres ---" +if [ -f "${WORK_DIR}/postgres.sql" ]; then + PG_CTR=$(find_container postgres) + if [ -n "${PG_CTR}" ]; then + log "Restoring Postgres via container ${PG_CTR}…" + # Drop existing connections then restore. + podman exec -i "${PG_CTR}" psql -U hiy_admin -d postgres \ + -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname IN ('hiy','forgejo') AND pid <> pg_backend_pid();" \ + > /dev/null 2>&1 || true + podman exec -i "${PG_CTR}" psql -U hiy_admin -d postgres \ + < "${WORK_DIR}/postgres.sql" + log "Postgres restored." + else + log "WARNING: postgres container not running" + log " Start Postgres first, then run:" + log " podman exec -i psql -U hiy_admin -d postgres < ${WORK_DIR}/postgres.sql" + fi +else + log "No postgres.sql in archive — skipping" +fi + +# ── 5. Forgejo data volume ───────────────────────────────────────────────────── +log "--- Forgejo volume ---" +if [ -f "${WORK_DIR}/forgejo-data.tar" ]; then + log "Importing forgejo-data volume…" + podman volume exists forgejo-data 2>/dev/null || podman volume create forgejo-data + podman volume import forgejo-data "${WORK_DIR}/forgejo-data.tar" + log "forgejo-data restored." +else + log "No forgejo-data.tar in archive — skipping" +fi + +# ── 6. Caddy TLS certificates ────────────────────────────────────────────────── +log "--- Caddy volume ---" +if [ -f "${WORK_DIR}/caddy-data.tar" ]; then + log "Importing caddy-data volume…" + podman volume exists caddy-data 2>/dev/null || podman volume create caddy-data + podman volume import caddy-data "${WORK_DIR}/caddy-data.tar" + log "caddy-data restored." +else + log "No caddy-data.tar in archive — skipping" +fi + +log "=== Restore complete ===" +log "Bring the stack back up with:" +log " podman compose -f ${SCRIPT_DIR}/docker-compose.yml up -d" From 84ac8f3b9fe865331facc26a3c59503670473542 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 15:32:26 +0000 Subject: [PATCH 25/41] =?UTF-8?q?fix:=20copy=20hiy.db=20out=20of=20contain?= =?UTF-8?q?er=20before=20dumping=20=E2=80=94=20server=20image=20has=20no?= =?UTF-8?q?=20sqlite3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/backup.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/infra/backup.sh b/infra/backup.sh index a15e4c9..feaa894 100755 --- a/infra/backup.sh +++ b/infra/backup.sh @@ -56,9 +56,11 @@ find_container() { log "--- SQLite ---" SERVER_CTR=$(find_container server) if [ -n "${SERVER_CTR}" ]; then - log "Dumping hiy.db via container ${SERVER_CTR}…" - podman exec "${SERVER_CTR}" sqlite3 "${HIY_DATA_DIR}/hiy.db" .dump \ - > "${STAGING}/hiy.sql" + log "Copying hiy.db from container ${SERVER_CTR}…" + podman cp "${SERVER_CTR}:${HIY_DATA_DIR}/hiy.db" "${STAGING}/hiy.db" + log "Dumping hiy.db…" + sqlite3 "${STAGING}/hiy.db" .dump > "${STAGING}/hiy.sql" + rm "${STAGING}/hiy.db" elif [ -f "${HIY_DATA_DIR}/hiy.db" ]; then log "Server container not running — dumping from host path…" sqlite3 "${HIY_DATA_DIR}/hiy.db" .dump > "${STAGING}/hiy.sql" From b7430cbb654d236769757e843d5d040b19e963e0 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 26 Mar 2026 15:44:46 +0000 Subject: [PATCH 26/41] =?UTF-8?q?fix:=20add=20--transfers=201=20--retries?= =?UTF-8?q?=205=20to=20rclone=20=E2=80=94=20workaround=20for=20Proton=20Dr?= =?UTF-8?q?ive=20parallel=20upload=20bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/backup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/backup.sh b/infra/backup.sh index feaa894..a2c601a 100755 --- a/infra/backup.sh +++ b/infra/backup.sh @@ -140,7 +140,7 @@ log "Archive size: ${ARCHIVE_SIZE}" if [ -n "${BACKUP_REMOTE}" ]; then if command -v rclone &>/dev/null; then log "Uploading to ${BACKUP_REMOTE}…" - rclone copy "${ARCHIVE_PATH}" "${BACKUP_REMOTE}/" + rclone copy --transfers 1 --retries 5 "${ARCHIVE_PATH}" "${BACKUP_REMOTE}/" log "Upload complete." else log "WARNING: HIY_BACKUP_REMOTE is set but rclone is not installed — skipping" From 0fb3a6bfe15b60df09e7734b17ea11512ec07f5c Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 10:23:38 +0000 Subject: [PATCH 27/41] fix: add PATH to systemd service so podman-compose is found at boot https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/start.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/infra/start.sh b/infra/start.sh index a82ee60..cdca443 100755 --- a/infra/start.sh +++ b/infra/start.sh @@ -171,6 +171,7 @@ Wants=network-online.target [Service] Type=oneshot RemainAfterExit=yes +Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/$(id -un)/.local/bin ExecStart=${SCRIPT_DIR}/boot.sh ExecStop=podman compose --env-file ${REPO_ROOT}/.env -f ${SCRIPT_DIR}/docker-compose.yml down From c7af43ab33669106fb7b4ffefa6881fca5cce346 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 10:29:52 +0000 Subject: [PATCH 28/41] feat: restart stopped app containers on server startup Adds restore_app_containers() which runs at startup alongside restore_caddy_routes(). For each app with a successful deploy it inspects the container state via `podman inspect` and runs `podman start` if the container is exited (e.g. after a host reboot). Missing containers are logged as warnings requiring a manual redeploy. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- server/src/main.rs | 6 ++++ server/src/routes/apps.rs | 67 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/server/src/main.rs b/server/src/main.rs index 152bfe8..6bb8821 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -162,6 +162,12 @@ async fn main() -> anyhow::Result<()> { routes::apps::restore_caddy_routes(&restore_db).await; }); + // Restart any app containers that are stopped (e.g. after a host reboot). + let containers_db = state.db.clone(); + tokio::spawn(async move { + routes::apps::restore_app_containers(&containers_db).await; + }); + // ── Protected routes (admin login required) ─────────────────────────────── let protected = Router::new() .route("/", get(routes::ui::index)) diff --git a/server/src/routes/apps.rs b/server/src/routes/apps.rs index 58f8345..869ca4f 100644 --- a/server/src/routes/apps.rs +++ b/server/src/routes/apps.rs @@ -76,6 +76,73 @@ pub async fn restore_caddy_routes(db: &crate::DbPool) { tracing::info!("restore_caddy_routes: registered {} app routes", apps.len()); } +/// On startup, ensure every app that had a successful deploy is actually running. +/// If the host rebooted, containers will be in "exited" state — start them. +/// If a container is missing entirely, log a warning (we don't rebuild automatically). +pub async fn restore_app_containers(db: &crate::DbPool) { + let apps = match sqlx::query_as::<_, crate::models::App>("SELECT * FROM apps") + .fetch_all(db) + .await + { + Ok(a) => a, + Err(e) => { tracing::error!("restore_app_containers: DB error: {}", e); return; } + }; + + for app in &apps { + // Only care about apps that have at least one successful deploy. + let has_deploy: bool = sqlx::query_scalar( + "SELECT COUNT(*) > 0 FROM deploys WHERE app_id = ? AND status = 'success'" + ) + .bind(&app.id) + .fetch_one(db) + .await + .unwrap_or(false); + + if !has_deploy { + continue; + } + + let container = format!("hiy-{}", app.id); + + // Check container state via `podman inspect`. + let inspect = tokio::process::Command::new("podman") + .args(["inspect", "--format", "{{.State.Status}}", &container]) + .output() + .await; + + match inspect { + Ok(out) if out.status.success() => { + let status = String::from_utf8_lossy(&out.stdout).trim().to_string(); + if status == "running" { + tracing::debug!("restore_app_containers: {} already running", container); + } else { + tracing::info!("restore_app_containers: starting {} (was {})", container, status); + let start = tokio::process::Command::new("podman") + .args(["start", &container]) + .output() + .await; + match start { + Ok(o) if o.status.success() => + tracing::info!("restore_app_containers: {} started", container), + Ok(o) => + tracing::warn!("restore_app_containers: failed to start {}: {}", + container, String::from_utf8_lossy(&o.stderr).trim()), + Err(e) => + tracing::warn!("restore_app_containers: error starting {}: {}", container, e), + } + } + } + _ => { + tracing::warn!( + "restore_app_containers: container {} not found — redeploy needed", + container + ); + } + } + } + tracing::info!("restore_app_containers: done"); +} + /// Push a visibility change to Caddy without requiring a full redeploy. /// Best-effort: logs a warning on failure but does not surface an error to the caller. async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) { From fa12b80638b28b323035303733d0ad1f1eaffe68 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 10:30:09 +0000 Subject: [PATCH 29/41] chore: update Cargo.lock https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- Cargo.lock | 366 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 349 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4776597..272c9c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -294,6 +294,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.44" @@ -376,7 +382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -625,8 +631,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -636,9 +644,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", "r-efi 5.3.0", "wasip2", + "wasm-bindgen", ] [[package]] @@ -734,12 +744,13 @@ dependencies = [ "futures", "hex", "hmac", + "reqwest", "serde", "serde_json", "sha2", "sqlx", "tokio", - "tower-http", + "tower-http 0.5.2", "tracing", "tracing-subscriber", "uuid", @@ -836,6 +847,24 @@ dependencies = [ "pin-utils", "smallvec", "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls 0.23.37", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.6", ] [[package]] @@ -844,13 +873,21 @@ version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ + "base64 0.22.1", "bytes", + "futures-channel", + "futures-util", "http", "http-body", "hyper", + "ipnet", + "libc", + "percent-encoding", "pin-project-lite", + "socket2", "tokio", "tower-service", + "tracing", ] [[package]] @@ -1006,6 +1043,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ipnet" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" + +[[package]] +name = "iri-string" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e7418f59cc01c88316161279a7f665217ae316b388e58a0d10e29f54f1e5eb" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "itoa" version = "1.0.17" @@ -1099,6 +1152,12 @@ version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "matchers" version = "0.2.0" @@ -1183,7 +1242,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "smallvec", "zeroize", ] @@ -1368,6 +1427,61 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.23.37", + "socket2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls 0.23.37", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "quote" version = "1.0.45" @@ -1396,8 +1510,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.5", ] [[package]] @@ -1407,7 +1531,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.5", ] [[package]] @@ -1419,6 +1553,15 @@ dependencies = [ "getrandom 0.2.17", ] +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + [[package]] name = "redox_syscall" version = "0.5.18" @@ -1454,6 +1597,44 @@ version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.37", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower", + "tower-http 0.6.8", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 1.0.6", +] + [[package]] name = "ring" version = "0.17.14" @@ -1481,13 +1662,19 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8", - "rand_core", + "rand_core 0.6.4", "signature", "spki", "subtle", "zeroize", ] +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + [[package]] name = "rustix" version = "1.1.4" @@ -1508,10 +1695,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "ring", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.23.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki 0.103.10", + "subtle", + "zeroize", +] + [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -1521,6 +1722,16 @@ dependencies = [ "base64 0.21.7", ] +[[package]] +name = "rustls-pki-types" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" +dependencies = [ + "web-time", + "zeroize", +] + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -1531,6 +1742,17 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.103.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.22" @@ -1685,7 +1907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -1780,19 +2002,19 @@ dependencies = [ "once_cell", "paste", "percent-encoding", - "rustls", + "rustls 0.21.12", "rustls-pemfile", "serde", "serde_json", "sha2", "smallvec", "sqlformat", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", "url", - "webpki-roots", + "webpki-roots 0.25.4", ] [[package]] @@ -1864,7 +2086,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", - "rand", + "rand 0.8.5", "rsa", "serde", "sha1", @@ -1872,7 +2094,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror", + "thiserror 1.0.69", "tracing", "whoami", ] @@ -1904,14 +2126,14 @@ dependencies = [ "md-5", "memchr", "once_cell", - "rand", + "rand 0.8.5", "serde", "serde_json", "sha2", "smallvec", "sqlx-core", "stringprep", - "thiserror", + "thiserror 1.0.69", "tracing", "whoami", ] @@ -1990,6 +2212,9 @@ name = "sync_wrapper" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -2021,7 +2246,16 @@ version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl 2.0.18", ] [[package]] @@ -2035,6 +2269,17 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + [[package]] name = "thread_local" version = "1.1.9" @@ -2097,6 +2342,16 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls 0.23.37", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.18" @@ -2141,6 +2396,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -2215,6 +2488,12 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + [[package]] name = "typenum" version = "1.19.0" @@ -2291,6 +2570,7 @@ dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] [[package]] @@ -2334,6 +2614,15 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" @@ -2377,6 +2666,20 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" +dependencies = [ + "cfg-if", + "futures-util", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.114" @@ -2443,12 +2746,41 @@ dependencies = [ "semver", ] +[[package]] +name = "web-sys" +version = "0.3.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "webpki-roots" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "whoami" version = "1.6.1" From 4ef77bf255fe756d5c5aa388230b074cf683c1ef Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 13:33:00 +0000 Subject: [PATCH 30/41] feat: add Forgejo Actions runner (act_runner) to docker-compose MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds the act-runner service alongside Forgejo. It connects to the Podman socket proxy so CI jobs can build and run containers on the Pi. Also enables FORGEJO__actions__ENABLED on the Forgejo service. FORGEJO_RUNNER_TOKEN must be set in .env — obtain it from: Forgejo → Site Administration → Actions → Runners → Create new runner https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/.env.example | 3 +++ infra/docker-compose.yml | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/infra/.env.example b/infra/.env.example index 5625243..65b7b70 100644 --- a/infra/.env.example +++ b/infra/.env.example @@ -15,6 +15,9 @@ POSTGRES_PASSWORD=changeme # Forgejo (optional — only needed if you add the forgejo service to docker-compose.yml). FORGEJO_DB_PASSWORD=changeme FORGEJO_DOMAIN=git.yourdomain.com +# Actions runner registration token — obtain from Forgejo: +# Site Administration → Actions → Runners → Create new runner +FORGEJO_RUNNER_TOKEN= # ── Backup (infra/backup.sh) ────────────────────────────────────────────────── # Local directory to store backup archives. diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index d81ce6f..9495449 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -93,6 +93,8 @@ services: FORGEJO__server__SSH_DOMAIN: ${FORGEJO_DOMAIN} # Skip the first-run wizard — everything is configured via env vars above. FORGEJO__security__INSTALL_LOCK: "true" + # Enable Actions. + FORGEJO__actions__ENABLED: "true" volumes: - forgejo-data:/data depends_on: @@ -100,6 +102,26 @@ services: networks: - hiy-net + # ── Forgejo Actions runner ───────────────────────────────────────────────── + # Obtain FORGEJO_RUNNER_TOKEN from Forgejo: + # Site Administration → Actions → Runners → Create new runner + act-runner: + image: code.forgejo.org/forgejo/act_runner:latest + restart: unless-stopped + environment: + FORGEJO_INSTANCE_URL: https://${FORGEJO_DOMAIN} + FORGEJO_RUNNER_REGISTRATION_TOKEN: ${FORGEJO_RUNNER_TOKEN} + FORGEJO_RUNNER_NAME: hiy-runner + # Give the runner access to Podman so CI jobs can build/run containers. + DOCKER_HOST: tcp://podman-proxy:2375 + volumes: + - act-runner-data:/data + depends_on: + - forgejo + - podman-proxy + networks: + - hiy-net + # ── Reverse proxy ───────────────────────────────────────────────────────── caddy: image: docker.io/library/caddy:2-alpine @@ -170,6 +192,7 @@ networks: volumes: hiy-data: forgejo-data: + act-runner-data: caddy-data: caddy-config: hiy-pg-data: From 8561ee3e748817f38876af4cb9ea0f9bc410f6f2 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 13:47:12 +0000 Subject: [PATCH 31/41] feat: add systemd timer for automatic git pull + service restart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit auto-update.sh fetches origin every 5 minutes. If new commits are found it pulls and selectively restarts only what changed: - server/ or Cargo.* → rebuild + restart server container - docker-compose.yml → full stack up -d - proxy/Caddyfile → caddy reload - anything else → no restart needed start.sh now installs hiy-update.service + hiy-update.timer alongside the existing hiy.service boot unit. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/auto-update.sh | 51 ++++++++++++++++++++++++++++++++++++++++++++ infra/start.sh | 34 +++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100755 infra/auto-update.sh diff --git a/infra/auto-update.sh b/infra/auto-update.sh new file mode 100755 index 0000000..862dd31 --- /dev/null +++ b/infra/auto-update.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# auto-update.sh — pull latest changes and restart affected services. +# Run by the hiy-update.timer systemd user unit every 5 minutes. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +log() { echo "[hiy-update] $(date '+%H:%M:%S') $*"; } + +cd "$REPO_ROOT" + +# Fetch without touching the working tree. +git fetch origin 2>&1 | sed 's/^/[git] /' || { log "git fetch failed — skipping"; exit 0; } + +LOCAL=$(git rev-parse HEAD) +REMOTE=$(git rev-parse "@{u}" 2>/dev/null || echo "$LOCAL") + +if [ "$LOCAL" = "$REMOTE" ]; then + log "Already up to date ($LOCAL)." + exit 0 +fi + +log "New commits detected — pulling ($LOCAL → $REMOTE)…" +git pull 2>&1 | sed 's/^/[git] /' + +# Determine which services need restarting based on what changed. +CHANGED=$(git diff --name-only "$LOCAL" "$REMOTE") +log "Changed files: $(echo "$CHANGED" | tr '\n' ' ')" + +# Always rebuild the server if any server-side code changed. +SERVER_CHANGED=$(echo "$CHANGED" | grep -E '^server/|^Cargo' || true) +COMPOSE_CHANGED=$(echo "$CHANGED" | grep '^infra/docker-compose' || true) +CADDY_CHANGED=$(echo "$CHANGED" | grep '^proxy/Caddyfile' || true) + +COMPOSE_CMD="podman compose --env-file $REPO_ROOT/.env -f $SCRIPT_DIR/docker-compose.yml" + +if [ -n "$COMPOSE_CHANGED" ]; then + log "docker-compose.yml changed — restarting full stack…" + $COMPOSE_CMD up -d +elif [ -n "$SERVER_CHANGED" ]; then + log "Server code changed — rebuilding server…" + $COMPOSE_CMD up -d --build server +elif [ -n "$CADDY_CHANGED" ]; then + log "Caddyfile changed — reloading Caddy…" + $COMPOSE_CMD exec caddy caddy reload --config /etc/caddy/Caddyfile --adapter caddyfile +else + log "No service restart needed for these changes." +fi + +log "Done." diff --git a/infra/start.sh b/infra/start.sh index cdca443..67c84ea 100755 --- a/infra/start.sh +++ b/infra/start.sh @@ -183,3 +183,37 @@ systemctl --user daemon-reload systemctl --user enable hiy.service loginctl enable-linger "$(id -un)" 2>/dev/null || true echo "[hiy] Boot service installed: systemctl --user status hiy.service" + +# ── Install systemd timer for auto-update ───────────────────────────────────── +UPDATE_SERVICE="$SERVICE_DIR/hiy-update.service" +UPDATE_TIMER="$SERVICE_DIR/hiy-update.timer" + +cat > "$UPDATE_SERVICE" < "$UPDATE_TIMER" < Date: Fri, 27 Mar 2026 13:52:28 +0000 Subject: [PATCH 32/41] feat: add install.sh for fresh Raspberry Pi setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Covers everything that was done manually on the Pi: - apt packages: podman, aardvark-dns, sqlite3, git, uidmap, python3-pip - podman-compose via pip (to ~/.local/bin) - rclone (optional, prompted) - .env creation from template with prompted values and generated passwords - git upstream tracking for auto-update - hands off to start.sh at the end Safe to re-run — all steps are idempotent. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/install.sh | 142 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100755 infra/install.sh diff --git a/infra/install.sh b/infra/install.sh new file mode 100755 index 0000000..5033afd --- /dev/null +++ b/infra/install.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash +# install.sh — one-time setup for a fresh Raspberry Pi. +# +# Run this once after cloning the repo: +# cd ~/Hostityourself && ./infra/install.sh +# +# What it does: +# 1. Installs system packages (podman, aardvark-dns, sqlite3, git, uidmap) +# 2. Installs podman-compose (via pip, into ~/.local/bin) +# 3. Installs rclone (for off-site backups — optional) +# 4. Creates .env from the template and prompts for required values +# 5. Runs infra/start.sh to build and launch the stack +# +# Safe to re-run — all steps are idempotent. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +log() { echo; echo "▶ $*"; } +info() { echo " $*"; } +ok() { echo " ✓ $*"; } + +echo "╔══════════════════════════════════════════╗" +echo "║ HostItYourself — installer ║" +echo "╚══════════════════════════════════════════╝" + +# ── 1. System packages ───────────────────────────────────────────────────────── +log "Installing system packages…" +sudo apt-get update -qq +sudo apt-get install -y \ + podman \ + aardvark-dns \ + sqlite3 \ + git \ + uidmap \ + python3-pip \ + python3-venv \ + curl +ok "System packages installed." + +# ── 2. podman-compose ────────────────────────────────────────────────────────── +log "Checking podman-compose…" +if command -v podman-compose &>/dev/null; then + ok "podman-compose already installed ($(podman-compose --version 2>&1 | head -1))." +else + info "Installing podman-compose via pip…" + pip3 install --user podman-compose + ok "podman-compose installed to ~/.local/bin" +fi + +# Ensure ~/.local/bin is in PATH for this session and future logins. +if ! echo "$PATH" | grep -q "$HOME/.local/bin"; then + export PATH="$HOME/.local/bin:$PATH" +fi +PROFILE="$HOME/.bashrc" +if ! grep -q '\.local/bin' "$PROFILE" 2>/dev/null; then + echo 'export PATH="$HOME/.local/bin:$PATH"' >> "$PROFILE" + info "Added ~/.local/bin to PATH in $PROFILE" +fi + +# ── 3. rclone (optional) ─────────────────────────────────────────────────────── +log "rclone (used for off-site backups)…" +if command -v rclone &>/dev/null; then + ok "rclone already installed ($(rclone --version 2>&1 | head -1))." +else + read -r -p " Install rclone? [y/N] " _RCLONE + if [[ "${_RCLONE,,}" == "y" ]]; then + curl -fsSL https://rclone.org/install.sh | sudo bash + ok "rclone installed." + info "Configure a remote later with: rclone config" + info "Then set HIY_BACKUP_REMOTE in .env" + else + info "Skipped. Install later with: curl https://rclone.org/install.sh | sudo bash" + fi +fi + +# ── 4. .env setup ────────────────────────────────────────────────────────────── +log "Setting up .env…" +ENV_FILE="$REPO_ROOT/.env" +ENV_EXAMPLE="$SCRIPT_DIR/.env.example" + +if [ -f "$ENV_FILE" ]; then + ok ".env already exists — skipping (edit manually if needed)." +else + cp "$ENV_EXAMPLE" "$ENV_FILE" + info "Created .env from template. Filling in required values…" + echo + + # Helper: prompt for a value and write it into .env. + set_env() { + local key="$1" prompt="$2" default="$3" secret="${4:-}" + local current + current=$(grep "^${key}=" "$ENV_FILE" | cut -d= -f2- || echo "") + if [ -z "$current" ] || [ "$current" = "changeme" ] || [ "$current" = "" ]; then + if [ -n "$secret" ]; then + read -r -s -p " ${prompt} [${default}]: " _VAL; echo + else + read -r -p " ${prompt} [${default}]: " _VAL + fi + _VAL="${_VAL:-$default}" + # Replace the line in .env (works on both macOS and Linux). + sed -i "s|^${key}=.*|${key}=${_VAL}|" "$ENV_FILE" + fi + } + + set_env "DOMAIN_SUFFIX" "Your domain (e.g. example.com)" "yourdomain.com" + set_env "ACME_EMAIL" "Email for Let's Encrypt notices" "" + set_env "HIY_ADMIN_USER" "Dashboard admin username" "admin" + set_env "HIY_ADMIN_PASS" "Dashboard admin password" "$(openssl rand -hex 12)" "secret" + set_env "POSTGRES_PASSWORD" "Postgres admin password" "$(openssl rand -hex 16)" "secret" + set_env "FORGEJO_DB_PASSWORD" "Forgejo DB password" "$(openssl rand -hex 16)" "secret" + set_env "FORGEJO_DOMAIN" "Forgejo domain (e.g. git.example.com)" "git.yourdomain.com" + + echo + ok ".env written to $ENV_FILE" + info "Review it with: cat $ENV_FILE" +fi + +# ── 5. Git remote check ──────────────────────────────────────────────────────── +log "Checking git remote…" +cd "$REPO_ROOT" +REMOTE_URL=$(git remote get-url origin 2>/dev/null || echo "") +if [ -n "$REMOTE_URL" ]; then + ok "Git remote: $REMOTE_URL" +else + info "No git remote configured — auto-update will not work." + info "Set one with: git remote add origin " +fi + +# Ensure the tracking branch is set so auto-update.sh can compare commits. +BRANCH=$(git rev-parse --abbrev-ref HEAD) +if ! git rev-parse --abbrev-ref --symbolic-full-name '@{u}' &>/dev/null; then + info "Setting upstream tracking branch…" + git branch --set-upstream-to="origin/$BRANCH" "$BRANCH" 2>/dev/null || true +fi + +# ── 6. Launch the stack ──────────────────────────────────────────────────────── +log "Running start.sh to build and launch the stack…" +echo +exec "$SCRIPT_DIR/start.sh" From 99ab28d3beedd8ed5a2f54d1612d174d94e3d502 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 14:30:30 +0000 Subject: [PATCH 33/41] =?UTF-8?q?fix:=20rename=20act-runner=20to=20act=5Fr?= =?UTF-8?q?unner=20=E2=80=94=20podman-compose=20chokes=20on=20hyphens=20in?= =?UTF-8?q?=20service=20names?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 9495449..5672bb4 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -105,7 +105,7 @@ services: # ── Forgejo Actions runner ───────────────────────────────────────────────── # Obtain FORGEJO_RUNNER_TOKEN from Forgejo: # Site Administration → Actions → Runners → Create new runner - act-runner: + act_runner: image: code.forgejo.org/forgejo/act_runner:latest restart: unless-stopped environment: @@ -115,7 +115,7 @@ services: # Give the runner access to Podman so CI jobs can build/run containers. DOCKER_HOST: tcp://podman-proxy:2375 volumes: - - act-runner-data:/data + - act_runner_data:/data depends_on: - forgejo - podman-proxy @@ -192,7 +192,7 @@ networks: volumes: hiy-data: forgejo-data: - act-runner-data: + act_runner_data: caddy-data: caddy-config: hiy-pg-data: From f9eacd03bea2086666749a2648b9f81ee07b6256 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 14:33:54 +0000 Subject: [PATCH 34/41] fix: add container_name to act_runner to bypass podman-compose naming bug https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 5672bb4..ef17ec3 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -107,6 +107,7 @@ services: # Site Administration → Actions → Runners → Create new runner act_runner: image: code.forgejo.org/forgejo/act_runner:latest + container_name: act_runner restart: unless-stopped environment: FORGEJO_INSTANCE_URL: https://${FORGEJO_DOMAIN} From 3afdc66ec218aa266ec4c4cd3a3f746b8c7ad1a8 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 14:37:02 +0000 Subject: [PATCH 35/41] =?UTF-8?q?fix:=20correct=20Forgejo=20runner=20image?= =?UTF-8?q?=20=E2=80=94=20data.forgejo.org/forgejo/runner:6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit code.forgejo.org is the source repo, not the container registry. The OCI registry is data.forgejo.org and the image is 'runner', not 'act_runner'. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index ef17ec3..7f5c4cc 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -106,7 +106,7 @@ services: # Obtain FORGEJO_RUNNER_TOKEN from Forgejo: # Site Administration → Actions → Runners → Create new runner act_runner: - image: code.forgejo.org/forgejo/act_runner:latest + image: data.forgejo.org/forgejo/runner:6 container_name: act_runner restart: unless-stopped environment: From 4ac5700ac58efec173792ad18417d15b9abdd0af Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 14:41:45 +0000 Subject: [PATCH 36/41] fix: add runner-entrypoint.sh to register Forgejo runner on first start MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The data.forgejo.org/forgejo/runner image doesn't auto-register from env vars — it needs create-runner-file called explicitly before the daemon starts. The entrypoint handles registration on first run (no /data/.runner file) then execs the daemon on all subsequent starts. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 4 +++- infra/runner-entrypoint.sh | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100755 infra/runner-entrypoint.sh diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 7f5c4cc..14757f4 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -109,14 +109,16 @@ services: image: data.forgejo.org/forgejo/runner:6 container_name: act_runner restart: unless-stopped + command: ["/entrypoint.sh"] environment: FORGEJO_INSTANCE_URL: https://${FORGEJO_DOMAIN} - FORGEJO_RUNNER_REGISTRATION_TOKEN: ${FORGEJO_RUNNER_TOKEN} + FORGEJO_RUNNER_TOKEN: ${FORGEJO_RUNNER_TOKEN} FORGEJO_RUNNER_NAME: hiy-runner # Give the runner access to Podman so CI jobs can build/run containers. DOCKER_HOST: tcp://podman-proxy:2375 volumes: - act_runner_data:/data + - ./runner-entrypoint.sh:/entrypoint.sh:ro depends_on: - forgejo - podman-proxy diff --git a/infra/runner-entrypoint.sh b/infra/runner-entrypoint.sh new file mode 100755 index 0000000..98611a3 --- /dev/null +++ b/infra/runner-entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# runner-entrypoint.sh — register the Forgejo runner on first start, then run the daemon. +# +# On first run (no /data/.runner file) it calls create-runner-file to register +# with the Forgejo instance using FORGEJO_RUNNER_TOKEN. On subsequent starts it +# goes straight to the daemon. +set -e + +CONFIG=/data/.runner + +if [ ! -f "$CONFIG" ]; then + echo "[runner] No registration found — registering with Forgejo…" + forgejo-runner create-runner-file \ + --instance "${FORGEJO_INSTANCE_URL}" \ + --secret "${FORGEJO_RUNNER_TOKEN}" \ + --name "${FORGEJO_RUNNER_NAME:-hiy-runner}" \ + --connect + echo "[runner] Registration complete." +fi + +echo "[runner] Starting daemon…" +exec forgejo-runner daemon --config "$CONFIG" From 868bfbc365f76a0079d5177fbbc7d28252fb4954 Mon Sep 17 00:00:00 2001 From: Shautvast Date: Fri, 27 Mar 2026 15:58:06 +0100 Subject: [PATCH 37/41] container name removed --- README.md | 3 ++- docs/setup.md | 4 +++- infra/docker-compose.yml | 1 - 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2023813..c654ca9 100644 --- a/README.md +++ b/README.md @@ -8,4 +8,5 @@ * Runs on your hardware (linux vm/host) * Integrate with git using github webhooks or add your own git remote * automatic redeployment after git push -* Builtin ssl. Automatically provisioned using let's encrypt. \ No newline at end of file +* Builtin ssl. Automatically provisioned using let's encrypt. +* Caddy reverse proxy \ No newline at end of file diff --git a/docs/setup.md b/docs/setup.md index 6db01e5..9d9bbeb 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -45,7 +45,9 @@ ssh pi@hiypi.local ```bash sudo apt update && sudo apt full-upgrade -y -sudo apt install -y git curl ufw fail2ban unattended-upgrades +sudo apt install -y git curl ufw fail2ban unattended-upgrades podman python3 pipx aardvark-dns sqlite3 +pipx install podman-compose +pipx ensurepath ``` ### Static IP (optional but recommended) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index 14757f4..e1f012c 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -107,7 +107,6 @@ services: # Site Administration → Actions → Runners → Create new runner act_runner: image: data.forgejo.org/forgejo/runner:6 - container_name: act_runner restart: unless-stopped command: ["/entrypoint.sh"] environment: From 84c36c464d901d1afb25610325588cd2e80bcddb Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 15:28:51 +0000 Subject: [PATCH 38/41] fix: use forgejo-runner register --token instead of create-runner-file --secret MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The UI registration token is not a hex string — create-runner-file --secret expects a hex secret. Use the register subcommand with --token instead, which accepts the token from the Forgejo UI directly. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/runner-entrypoint.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/infra/runner-entrypoint.sh b/infra/runner-entrypoint.sh index 98611a3..8ddee42 100755 --- a/infra/runner-entrypoint.sh +++ b/infra/runner-entrypoint.sh @@ -10,11 +10,12 @@ CONFIG=/data/.runner if [ ! -f "$CONFIG" ]; then echo "[runner] No registration found — registering with Forgejo…" - forgejo-runner create-runner-file \ + forgejo-runner register \ --instance "${FORGEJO_INSTANCE_URL}" \ - --secret "${FORGEJO_RUNNER_TOKEN}" \ + --token "${FORGEJO_RUNNER_TOKEN}" \ --name "${FORGEJO_RUNNER_NAME:-hiy-runner}" \ - --connect + --labels "ubuntu-latest:docker://node:20-bookworm,ubuntu-22.04:docker://node:20-bookworm" \ + --no-interactive echo "[runner] Registration complete." fi From 7b37f88fb5e63692c8c707ce92d0068f78f931df Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 15:35:13 +0000 Subject: [PATCH 39/41] =?UTF-8?q?fix:=20use=20internal=20Forgejo=20URL=20f?= =?UTF-8?q?or=20act=5Frunner=20=E2=80=94=20avoids=20routing=20through=20pu?= =?UTF-8?q?blic=20IP?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The runner is on hiy-net and can reach Forgejo directly at http://forgejo:3000 rather than going out through the public IP and Caddy. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/docker-compose.yml b/infra/docker-compose.yml index e1f012c..d8d4376 100644 --- a/infra/docker-compose.yml +++ b/infra/docker-compose.yml @@ -110,7 +110,7 @@ services: restart: unless-stopped command: ["/entrypoint.sh"] environment: - FORGEJO_INSTANCE_URL: https://${FORGEJO_DOMAIN} + FORGEJO_INSTANCE_URL: http://forgejo:3000 FORGEJO_RUNNER_TOKEN: ${FORGEJO_RUNNER_TOKEN} FORGEJO_RUNNER_NAME: hiy-runner # Give the runner access to Podman so CI jobs can build/run containers. From 2b4f06623404dd98dacc92981ea6176dd9448ccc Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 27 Mar 2026 17:52:29 +0000 Subject: [PATCH 40/41] fix: source .env at startup in backup.sh Automatically loads HIY_BACKUP_DIR, HIY_BACKUP_REMOTE, HIY_BACKUP_RETAIN_DAYS and other vars from .env so the cron job works without extra shell setup. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH --- infra/backup.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/infra/backup.sh b/infra/backup.sh index a2c601a..ecf790f 100755 --- a/infra/backup.sh +++ b/infra/backup.sh @@ -22,17 +22,19 @@ set -euo pipefail +# ── Load .env ────────────────────────────────────────────────────────────────── +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ENV_FILE="${SCRIPT_DIR}/../.env" +if [ -f "$ENV_FILE" ]; then + set -a; source "$ENV_FILE"; set +a +fi + # ── Config ───────────────────────────────────────────────────────────────────── HIY_DATA_DIR="${HIY_DATA_DIR:-/data}" BACKUP_DIR="${HIY_BACKUP_DIR:-/tmp/hiy-backups}" BACKUP_REMOTE="${HIY_BACKUP_REMOTE:-}" RETAIN_DAYS="${HIY_BACKUP_RETAIN_DAYS:-30}" -# Load .env from the repo root (one level up from infra/) so the backup cron -# can find HIY_DATA_DIR, container names, etc. without extra shell setup. -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -ENV_FILE="${SCRIPT_DIR}/../.env" - TIMESTAMP=$(date +%Y%m%d-%H%M%S) ARCHIVE_NAME="hiy-backup-${TIMESTAMP}.tar.gz" STAGING="${BACKUP_DIR}/staging-${TIMESTAMP}" From 55e3f97946ee47728d0ae298b92504a378967645 Mon Sep 17 00:00:00 2001 From: Shautvast Date: Sat, 28 Mar 2026 14:35:36 +0100 Subject: [PATCH 41/41] Use patched rclone for proton drive integration --- infra/backup.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/infra/backup.sh b/infra/backup.sh index ecf790f..0d1e07c 100755 --- a/infra/backup.sh +++ b/infra/backup.sh @@ -142,7 +142,8 @@ log "Archive size: ${ARCHIVE_SIZE}" if [ -n "${BACKUP_REMOTE}" ]; then if command -v rclone &>/dev/null; then log "Uploading to ${BACKUP_REMOTE}…" - rclone copy --transfers 1 --retries 5 "${ARCHIVE_PATH}" "${BACKUP_REMOTE}/" + #use patched rclone for now + /home/sander/dev/rclone/rclone copy --transfers 1 --retries 5 "${ARCHIVE_PATH}" "${BACKUP_REMOTE}/" log "Upload complete." else log "WARNING: HIY_BACKUP_REMOTE is set but rclone is not installed — skipping"
Schema{schema}