Compare commits

..

No commits in common. "d3ef4d2030db2b75bceb1d120e775df64a7fad25" and "54ceedbe5af108b0417431239f3a67ecc7484e7e" have entirely different histories.

6 changed files with 1 additions and 89 deletions

View file

@ -11,7 +11,3 @@ HIY_ADMIN_PASS=changeme
# Postgres admin password — used by the shared cluster. # Postgres admin password — used by the shared cluster.
# App schemas get their own scoped users; this password never leaves the server. # App schemas get their own scoped users; this password never leaves the server.
POSTGRES_PASSWORD=changeme POSTGRES_PASSWORD=changeme
# Forgejo (optional — only needed if you add the forgejo service to docker-compose.yml).
FORGEJO_DB_PASSWORD=changeme
FORGEJO_DOMAIN=git.yourdomain.com

View file

@ -68,35 +68,8 @@ services:
POSTGRES_DB: hiy POSTGRES_DB: hiy
POSTGRES_USER: hiy_admin POSTGRES_USER: hiy_admin
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
FORGEJO_DB_PASSWORD: ${FORGEJO_DB_PASSWORD}
volumes: volumes:
- hiy-pg-data:/var/lib/postgresql/data - hiy-pg-data:/var/lib/postgresql/data
# SQL files here run once on first init (ignored if data volume already exists).
- ./postgres-init:/docker-entrypoint-initdb.d:ro
networks:
- hiy-net
# ── Forgejo (self-hosted Git) ──────────────────────────────────────────────
forgejo:
image: codeberg.org/forgejo/forgejo:10
restart: unless-stopped
environment:
USER_UID: 1000
USER_GID: 1000
FORGEJO__database__DB_TYPE: postgres
FORGEJO__database__HOST: postgres:5432
FORGEJO__database__NAME: forgejo
FORGEJO__database__USER: forgejo
FORGEJO__database__PASSWD: ${FORGEJO_DB_PASSWORD}
FORGEJO__server__DOMAIN: ${FORGEJO_DOMAIN}
FORGEJO__server__ROOT_URL: https://${FORGEJO_DOMAIN}/
FORGEJO__server__SSH_DOMAIN: ${FORGEJO_DOMAIN}
# Skip the first-run wizard — everything is configured via env vars above.
FORGEJO__security__INSTALL_LOCK: "true"
volumes:
- forgejo-data:/data
depends_on:
- postgres
networks: networks:
- hiy-net - hiy-net
@ -116,7 +89,7 @@ services:
- ../proxy/Caddyfile:/etc/caddy/Caddyfile:ro - ../proxy/Caddyfile:/etc/caddy/Caddyfile:ro
- caddy-data:/data - caddy-data:/data
- caddy-config:/config - caddy-config:/config
command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile --resume
networks: networks:
- hiy-net - hiy-net
- default - default
@ -169,7 +142,6 @@ networks:
volumes: volumes:
hiy-data: hiy-data:
forgejo-data:
caddy-data: caddy-data:
caddy-config: caddy-config:
hiy-pg-data: hiy-pg-data:

View file

@ -1,10 +0,0 @@
#!/bin/sh
# Create a dedicated database and user for Forgejo.
# Runs once when the Postgres container is first initialised.
# FORGEJO_DB_PASSWORD must be set in the environment (via docker-compose.yml).
set -e
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
CREATE USER forgejo WITH PASSWORD '${FORGEJO_DB_PASSWORD}';
CREATE DATABASE forgejo OWNER forgejo;
EOSQL

View file

@ -31,15 +31,6 @@
reverse_proxy server:3000 reverse_proxy server:3000
} }
# ── Static services (not managed by HIY) ──────────────────────────────────────
# Set FORGEJO_DOMAIN in .env (e.g. git.yourdomain.com). Falls back to a
# non-routable placeholder so Caddy starts cleanly even if Forgejo isn't used.
{$FORGEJO_DOMAIN:forgejo.localhost} {
tls {$ACME_EMAIL:internal}
reverse_proxy forgejo:3000
}
# Deployed apps are added here dynamically by hiy-server via the Caddy API. # Deployed apps are added here dynamically by hiy-server via the Caddy API.
# Each entry looks like: # Each entry looks like:
# #

View file

@ -154,14 +154,6 @@ async fn main() -> anyhow::Result<()> {
builder::build_worker(worker_state).await; builder::build_worker(worker_state).await;
}); });
// Re-register all app Caddy routes from the DB on startup.
// Caddy no longer uses --resume, so routes must be restored each time the
// stack restarts (ensures Caddyfile changes are always picked up).
let restore_db = state.db.clone();
tokio::spawn(async move {
routes::apps::restore_caddy_routes(&restore_db).await;
});
// ── Protected routes (admin login required) ─────────────────────────────── // ── Protected routes (admin login required) ───────────────────────────────
let protected = Router::new() let protected = Router::new()
.route("/", get(routes::ui::index)) .route("/", get(routes::ui::index))

View file

@ -47,35 +47,6 @@ fn caddy_route(app_host: &str, upstream: &str, is_public: bool) -> serde_json::V
} }
} }
/// Re-register every app's Caddy route from the database.
/// Called at startup so that removing `--resume` from Caddy doesn't lose
/// routes when the stack restarts.
pub async fn restore_caddy_routes(db: &crate::DbPool) {
// Give Caddy a moment to finish loading the Caddyfile before we PATCH it.
let caddy_api = std::env::var("CADDY_API_URL").unwrap_or_else(|_| "http://caddy:2019".into());
let client = reqwest::Client::new();
for attempt in 1..=10u32 {
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
if client.get(format!("{}/config/", caddy_api)).send().await.is_ok() {
break;
}
tracing::info!("restore_caddy_routes: waiting for Caddy ({}/10)…", attempt);
}
let apps = match sqlx::query_as::<_, crate::models::App>("SELECT * FROM apps")
.fetch_all(db)
.await
{
Ok(a) => a,
Err(e) => { tracing::error!("restore_caddy_routes: DB error: {}", e); return; }
};
for app in &apps {
push_visibility_to_caddy(&app.id, app.port, app.is_public != 0).await;
}
tracing::info!("restore_caddy_routes: registered {} app routes", apps.len());
}
/// Push a visibility change to Caddy without requiring a full redeploy. /// Push a visibility change to Caddy without requiring a full redeploy.
/// Best-effort: logs a warning on failure but does not surface an error to the caller. /// Best-effort: logs a warning on failure but does not surface an error to the caller.
async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) { async fn push_visibility_to_caddy(app_id: &str, port: i64, is_public: bool) {