Two root causes: 1. Caddy was started without --resume, so every restart wiped all dynamically-registered app routes (only the base Caddyfile survived). Adding --resume makes Caddy reload its auto-saved config (stored in the caddy-config volume) which includes all app routes. 2. App routes used the container IP address, which changes whenever hiy-net is torn down and recreated by compose. Switch to the container name as the upstream dial address; Podman's aardvark-dns resolves it by name within hiy-net, so it stays valid across network recreations. Together with the existing reconnect loop in start.sh these two changes mean deployed apps survive a platform restart without needing a redeploy. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH
90 lines
3.1 KiB
YAML
90 lines
3.1 KiB
YAML
# HIY — local development stack
|
|
# Run with: podman compose up --build (or: docker compose up --build)
|
|
#
|
|
# On a real Pi you would run Caddy as a systemd service; here it runs in Compose
|
|
# so you can develop without changing the host.
|
|
|
|
services:
|
|
|
|
# ── Podman socket proxy (unix → TCP) ──────────────────────────────────────
|
|
# start.sh exports PODMAN_SOCK before invoking compose, so the correct
|
|
# socket is used regardless of rootful vs rootless:
|
|
# rootful: /run/podman/podman.sock
|
|
# rootless: /run/user/<UID>/podman/podman.sock (start.sh sets this)
|
|
podman-proxy:
|
|
image: alpine/socat
|
|
command: tcp-listen:2375,fork,reuseaddr unix-connect:/podman.sock
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ${PODMAN_SOCK}:/podman.sock
|
|
networks:
|
|
- hiy-net
|
|
|
|
# ── Control plane ─────────────────────────────────────────────────────────
|
|
server:
|
|
build:
|
|
context: ..
|
|
dockerfile: infra/Dockerfile.server
|
|
restart: unless-stopped
|
|
ports:
|
|
- "3000:3000"
|
|
volumes:
|
|
- hiy-data:/data
|
|
# Mount the builder script so edits take effect without rebuilding.
|
|
- ../builder:/app/builder:ro
|
|
env_file:
|
|
- path: ../.env
|
|
required: false
|
|
environment:
|
|
HIY_DATA_DIR: /data
|
|
HIY_ADDR: 0.0.0.0:3000
|
|
HIY_BUILD_SCRIPT: /app/builder/build.sh
|
|
CADDY_API_URL: http://caddy:2019
|
|
DOCKER_HOST: tcp://podman-proxy:2375
|
|
# CONTAINER_HOST is the Podman-native equivalent of DOCKER_HOST.
|
|
# Setting it makes `podman` automatically operate in remote mode and
|
|
# delegate all builds/runs to the host's Podman service via the proxy,
|
|
# instead of trying to run Podman locally inside this container (which
|
|
# would fail: no user-namespace support in an unprivileged container).
|
|
CONTAINER_HOST: tcp://podman-proxy:2375
|
|
RUST_LOG: hiy_server=debug,tower_http=info
|
|
depends_on:
|
|
caddy:
|
|
condition: service_started
|
|
podman-proxy:
|
|
condition: service_started
|
|
networks:
|
|
- hiy-net
|
|
- default
|
|
|
|
# ── Reverse proxy ─────────────────────────────────────────────────────────
|
|
caddy:
|
|
image: caddy:2-alpine
|
|
restart: unless-stopped
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
# Port 2019 (Caddy admin API) is intentionally NOT published to the host.
|
|
# It is only reachable within the hiy-net Docker network (http://caddy:2019).
|
|
env_file:
|
|
- path: ../.env
|
|
required: false
|
|
volumes:
|
|
- ../proxy/Caddyfile:/etc/caddy/Caddyfile:ro
|
|
- caddy-data:/data
|
|
- caddy-config:/config
|
|
command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile --resume
|
|
networks:
|
|
- hiy-net
|
|
- default
|
|
|
|
networks:
|
|
hiy-net:
|
|
name: hiy-net
|
|
# External so deployed app containers can join it.
|
|
external: false
|
|
|
|
volumes:
|
|
hiy-data:
|
|
caddy-data:
|
|
caddy-config:
|