Hostityourself/infra/docker-compose.yml
Claude 22a6ab103c
fix: wait for Postgres to be ready before starting Forgejo
Adds a pg_isready healthcheck to the postgres service and upgrades the
Forgejo depends_on to condition: service_healthy, preventing the
"connection refused" crash on startup.

https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH
2026-03-26 11:27:10 +00:00

184 lines
6.4 KiB
YAML

# HIY — local development stack
# Run with: podman compose up --build (or: docker compose up --build)
#
# On a real Pi you would run Caddy as a systemd service; here it runs in Compose
# so you can develop without changing the host.
services:
# ── Podman socket proxy (unix → TCP) ──────────────────────────────────────
# start.sh exports PODMAN_SOCK before invoking compose, so the correct
# socket is used regardless of rootful vs rootless:
# rootful: /run/podman/podman.sock
# rootless: /run/user/<UID>/podman/podman.sock (start.sh sets this)
podman-proxy:
image: docker.io/alpine/socat
command: tcp-listen:2375,fork,reuseaddr unix-connect:/podman.sock
restart: unless-stopped
volumes:
- ${PODMAN_SOCK}:/podman.sock
networks:
- hiy-net
# ── Control plane ─────────────────────────────────────────────────────────
server:
build:
context: ..
dockerfile: infra/Dockerfile.server
restart: unless-stopped
ports:
- "3000:3000"
volumes:
- hiy-data:/data
# Mount the builder script so edits take effect without rebuilding.
- ../builder:/app/builder:ro
env_file:
- path: ../.env
required: false
environment:
HIY_DATA_DIR: /data
HIY_ADDR: 0.0.0.0:3000
HIY_BUILD_SCRIPT: /app/builder/build.sh
CADDY_API_URL: http://caddy:2019
DOCKER_HOST: tcp://podman-proxy:2375
# CONTAINER_HOST is the Podman-native equivalent of DOCKER_HOST.
# Setting it makes `podman` automatically operate in remote mode and
# delegate all builds/runs to the host's Podman service via the proxy,
# instead of trying to run Podman locally inside this container (which
# would fail: no user-namespace support in an unprivileged container).
CONTAINER_HOST: tcp://podman-proxy:2375
RUST_LOG: hiy_server=debug,tower_http=info
POSTGRES_URL: postgres://hiy_admin:${POSTGRES_PASSWORD}@postgres:5432/hiy
depends_on:
caddy:
condition: service_started
podman-proxy:
condition: service_started
postgres:
condition: service_started
networks:
- hiy-net
- default
# ── Shared Postgres ───────────────────────────────────────────────────────
postgres:
image: docker.io/library/postgres:16-alpine
restart: unless-stopped
environment:
POSTGRES_DB: hiy
POSTGRES_USER: hiy_admin
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
FORGEJO_DB_PASSWORD: ${FORGEJO_DB_PASSWORD}
volumes:
- hiy-pg-data:/var/lib/postgresql/data
# SQL files here run once on first init (ignored if data volume already exists).
- ./postgres-init:/docker-entrypoint-initdb.d:ro
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 5s
timeout: 5s
retries: 10
networks:
- hiy-net
# ── Forgejo (self-hosted Git) ──────────────────────────────────────────────
forgejo:
image: codeberg.org/forgejo/forgejo:10
restart: unless-stopped
environment:
USER_UID: 1000
USER_GID: 1000
FORGEJO__database__DB_TYPE: postgres
FORGEJO__database__HOST: postgres:5432
FORGEJO__database__NAME: forgejo
FORGEJO__database__USER: forgejo
FORGEJO__database__PASSWD: ${FORGEJO_DB_PASSWORD}
FORGEJO__server__DOMAIN: ${FORGEJO_DOMAIN}
FORGEJO__server__ROOT_URL: https://${FORGEJO_DOMAIN}/
FORGEJO__server__SSH_DOMAIN: ${FORGEJO_DOMAIN}
# Skip the first-run wizard — everything is configured via env vars above.
FORGEJO__security__INSTALL_LOCK: "true"
volumes:
- forgejo-data:/data
depends_on:
postgres:
condition: service_healthy
networks:
- hiy-net
# ── Reverse proxy ─────────────────────────────────────────────────────────
caddy:
image: docker.io/library/caddy:2-alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
# Port 2019 (Caddy admin API) is intentionally NOT published to the host.
# It is only reachable within the hiy-net Docker network (http://caddy:2019).
env_file:
- path: ../.env
required: false
volumes:
- ../proxy/Caddyfile:/etc/caddy/Caddyfile:ro
- caddy-data:/data
- caddy-config:/config
command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile
networks:
- hiy-net
- default
# ── Uptime / health checks ────────────────────────────────────────────────
# Enable with: podman compose --profile monitoring up -d
gatus:
profiles: [monitoring]
image: docker.io/twinproduction/gatus:latest
restart: unless-stopped
ports:
- "8080:8080"
volumes:
- ./gatus.yml:/config/config.yaml:ro
networks:
- hiy-net
# ── Host metrics (rootful Podman / Docker only) ───────────────────────────
# On rootless Podman some host mounts may be unavailable; comment out if so.
netdata:
profiles: [monitoring]
image: docker.io/netdata/netdata:stable
restart: unless-stopped
ports:
- "19999:19999"
pid: host
cap_add:
- SYS_PTRACE
- SYS_ADMIN
security_opt:
- apparmor:unconfined
volumes:
- netdata-config:/etc/netdata
- netdata-lib:/var/lib/netdata
- netdata-cache:/var/cache/netdata
- /etc/os-release:/host/etc/os-release:ro
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /proc:/host/proc:ro
- /sys:/host/sys:ro
networks:
- hiy-net
networks:
hiy-net:
name: hiy-net
# External so deployed app containers can join it.
external: false
default: {}
volumes:
hiy-data:
forgejo-data:
caddy-data:
caddy-config:
hiy-pg-data:
netdata-config:
netdata-lib:
netdata-cache: