# HIY — local development stack # Run with: podman compose up --build (or: docker compose up --build) # # On a real Pi you would run Caddy as a systemd service; here it runs in Compose # so you can develop without changing the host. services: # ── Podman socket proxy (unix → TCP) ────────────────────────────────────── # start.sh exports PODMAN_SOCK before invoking compose, so the correct # socket is used regardless of rootful vs rootless: # rootful: /run/podman/podman.sock # rootless: /run/user//podman/podman.sock (start.sh sets this) podman-proxy: image: docker.io/alpine/socat command: tcp-listen:2375,fork,reuseaddr unix-connect:/podman.sock restart: unless-stopped volumes: - ${PODMAN_SOCK}:/podman.sock networks: - hiy-net # ── Control plane ───────────────────────────────────────────────────────── server: build: context: .. dockerfile: infra/Dockerfile.server restart: unless-stopped ports: - "3000:3000" volumes: - hiy-data:/data # Mount the builder script so edits take effect without rebuilding. - ../builder:/app/builder:ro env_file: - path: ../.env required: false environment: HIY_DATA_DIR: /data HIY_ADDR: 0.0.0.0:3000 HIY_BUILD_SCRIPT: /app/builder/build.sh CADDY_API_URL: http://caddy:2019 DOCKER_HOST: tcp://podman-proxy:2375 # CONTAINER_HOST is the Podman-native equivalent of DOCKER_HOST. # Setting it makes `podman` automatically operate in remote mode and # delegate all builds/runs to the host's Podman service via the proxy, # instead of trying to run Podman locally inside this container (which # would fail: no user-namespace support in an unprivileged container). CONTAINER_HOST: tcp://podman-proxy:2375 RUST_LOG: hiy_server=debug,tower_http=info POSTGRES_URL: postgres://hiy_admin:${POSTGRES_PASSWORD}@postgres:5432/hiy depends_on: caddy: condition: service_started podman-proxy: condition: service_started postgres: condition: service_started networks: - hiy-net - default # ── Shared Postgres ─────────────────────────────────────────────────────── postgres: image: docker.io/library/postgres:16-alpine restart: unless-stopped environment: POSTGRES_DB: hiy POSTGRES_USER: hiy_admin POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} FORGEJO_DB_PASSWORD: ${FORGEJO_DB_PASSWORD} volumes: - hiy-pg-data:/var/lib/postgresql/data # SQL files here run once on first init (ignored if data volume already exists). - ./postgres-init:/docker-entrypoint-initdb.d:ro networks: - hiy-net # ── Forgejo (self-hosted Git) ────────────────────────────────────────────── forgejo: image: codeberg.org/forgejo/forgejo:10 restart: unless-stopped environment: USER_UID: 1000 USER_GID: 1000 FORGEJO__database__DB_TYPE: postgres FORGEJO__database__HOST: postgres:5432 FORGEJO__database__NAME: forgejo FORGEJO__database__USER: forgejo FORGEJO__database__PASSWD: ${FORGEJO_DB_PASSWORD} FORGEJO__server__DOMAIN: ${FORGEJO_DOMAIN} FORGEJO__server__ROOT_URL: https://${FORGEJO_DOMAIN}/ FORGEJO__server__SSH_DOMAIN: ${FORGEJO_DOMAIN} # Skip the first-run wizard — everything is configured via env vars above. FORGEJO__security__INSTALL_LOCK: "true" # Enable Actions. FORGEJO__actions__ENABLED: "true" volumes: - forgejo-data:/data depends_on: - postgres networks: - hiy-net # ── Forgejo Actions runner ───────────────────────────────────────────────── # Obtain FORGEJO_RUNNER_TOKEN from Forgejo: # Site Administration → Actions → Runners → Create new runner act_runner: image: data.forgejo.org/forgejo/runner:6 restart: unless-stopped command: ["/entrypoint.sh"] environment: FORGEJO_INSTANCE_URL: http://forgejo:3000 FORGEJO_RUNNER_TOKEN: ${FORGEJO_RUNNER_TOKEN} FORGEJO_RUNNER_NAME: hiy-runner # Give the runner access to Podman so CI jobs can build/run containers. DOCKER_HOST: tcp://podman-proxy:2375 volumes: - act_runner_data:/data - ./runner-entrypoint.sh:/entrypoint.sh:ro depends_on: - forgejo - podman-proxy networks: - hiy-net # ── Reverse proxy ───────────────────────────────────────────────────────── caddy: image: docker.io/library/caddy:2-alpine restart: unless-stopped ports: - "80:80" - "443:443" # Port 2019 (Caddy admin API) is intentionally NOT published to the host. # It is only reachable within the hiy-net Docker network (http://caddy:2019). env_file: - path: ../.env required: false volumes: - ../proxy/Caddyfile:/etc/caddy/Caddyfile:ro - ../proxy/www:/srv/www:ro - caddy-data:/data - caddy-config:/config command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile networks: - hiy-net - default # ── Uptime / health checks ──────────────────────────────────────────────── # Enable with: podman compose --profile monitoring up -d gatus: profiles: [monitoring] image: docker.io/twinproduction/gatus:latest restart: unless-stopped ports: - "8080:8080" volumes: - ./gatus.yml:/config/config.yaml:ro networks: - hiy-net # ── Host metrics (rootful Podman / Docker only) ─────────────────────────── # On rootless Podman some host mounts may be unavailable; comment out if so. netdata: profiles: [monitoring] image: docker.io/netdata/netdata:stable restart: unless-stopped ports: - "19999:19999" pid: host cap_add: - SYS_PTRACE - SYS_ADMIN security_opt: - apparmor:unconfined volumes: - netdata-config:/etc/netdata - netdata-lib:/var/lib/netdata - netdata-cache:/var/cache/netdata - /etc/os-release:/host/etc/os-release:ro - /etc/passwd:/host/etc/passwd:ro - /etc/group:/host/etc/group:ro - /proc:/host/proc:ro - /sys:/host/sys:ro networks: - hiy-net networks: hiy-net: name: hiy-net # External so deployed app containers can join it. external: false default: {} volumes: hiy-data: forgejo-data: act_runner_data: caddy-data: caddy-config: hiy-pg-data: netdata-config: netdata-lib: netdata-cache: