Hostityourself/infra/docker-compose.yml
Claude f4aa6972e1
feat: shared Postgres with per-app schemas
One Postgres 16 instance runs in the infra stack (docker-compose).
Each app can be given its own isolated schema with a dedicated,
scoped Postgres user via the new Database card on the app detail page.

What was added:

infra/
  docker-compose.yml  — postgres:16-alpine service + hiy-pg-data
                        volume; POSTGRES_URL injected into server
  .env.example        — POSTGRES_PASSWORD entry

server/
  Cargo.toml          — sqlx postgres feature
  src/db.rs           — databases table (SQLite) migration
  src/models.rs       — Database model
  src/main.rs         — PgPool (lazy) added to AppState;
                        /api/apps/:id/database routes registered
  src/routes/mod.rs   — databases module
  src/routes/databases.rs — GET / POST / DELETE handlers:
      provision  — creates schema + scoped PG user, sets search_path,
                   injects DATABASE_URL env var
      deprovision — DROP OWNED BY + DROP ROLE + DROP SCHEMA CASCADE,
                   removes SQLite record
  src/routes/ui.rs    — app_detail queries databases table, renders
                        db_card based on provisioning state
  templates/app_detail.html — {{db_card}} placeholder +
                              provisionDb / deprovisionDb JS

Apps connect via:
  postgres://hiy-<app>:<pw>@postgres:5432/hiy
search_path is set on the role so no URL option is needed.

https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH
2026-03-24 13:16:39 +00:00

107 lines
3.7 KiB
YAML

# HIY — local development stack
# Run with: podman compose up --build (or: docker compose up --build)
#
# On a real Pi you would run Caddy as a systemd service; here it runs in Compose
# so you can develop without changing the host.
services:
# ── Podman socket proxy (unix → TCP) ──────────────────────────────────────
# start.sh exports PODMAN_SOCK before invoking compose, so the correct
# socket is used regardless of rootful vs rootless:
# rootful: /run/podman/podman.sock
# rootless: /run/user/<UID>/podman/podman.sock (start.sh sets this)
podman-proxy:
image: alpine/socat
command: tcp-listen:2375,fork,reuseaddr unix-connect:/podman.sock
restart: unless-stopped
volumes:
- ${PODMAN_SOCK}:/podman.sock
networks:
- hiy-net
# ── Control plane ─────────────────────────────────────────────────────────
server:
build:
context: ..
dockerfile: infra/Dockerfile.server
restart: unless-stopped
ports:
- "3000:3000"
volumes:
- hiy-data:/data
# Mount the builder script so edits take effect without rebuilding.
- ../builder:/app/builder:ro
env_file:
- path: ../.env
required: false
environment:
HIY_DATA_DIR: /data
HIY_ADDR: 0.0.0.0:3000
HIY_BUILD_SCRIPT: /app/builder/build.sh
CADDY_API_URL: http://caddy:2019
DOCKER_HOST: tcp://podman-proxy:2375
# CONTAINER_HOST is the Podman-native equivalent of DOCKER_HOST.
# Setting it makes `podman` automatically operate in remote mode and
# delegate all builds/runs to the host's Podman service via the proxy,
# instead of trying to run Podman locally inside this container (which
# would fail: no user-namespace support in an unprivileged container).
CONTAINER_HOST: tcp://podman-proxy:2375
RUST_LOG: hiy_server=debug,tower_http=info
POSTGRES_URL: postgres://hiy_admin:${POSTGRES_PASSWORD}@postgres:5432/hiy
depends_on:
caddy:
condition: service_started
podman-proxy:
condition: service_started
postgres:
condition: service_started
networks:
- hiy-net
- default
# ── Shared Postgres ───────────────────────────────────────────────────────
postgres:
image: postgres:16-alpine
restart: unless-stopped
environment:
POSTGRES_DB: hiy
POSTGRES_USER: hiy_admin
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- hiy-pg-data:/var/lib/postgresql/data
networks:
- hiy-net
# ── Reverse proxy ─────────────────────────────────────────────────────────
caddy:
image: caddy:2-alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
# Port 2019 (Caddy admin API) is intentionally NOT published to the host.
# It is only reachable within the hiy-net Docker network (http://caddy:2019).
env_file:
- path: ../.env
required: false
volumes:
- ../proxy/Caddyfile:/etc/caddy/Caddyfile:ro
- caddy-data:/data
- caddy-config:/config
command: caddy run --config /etc/caddy/Caddyfile --adapter caddyfile --resume
networks:
- hiy-net
- default
networks:
hiy-net:
name: hiy-net
# External so deployed app containers can join it.
external: false
volumes:
hiy-data:
caddy-data:
caddy-config:
hiy-pg-data: