make build was looking for Makefile in cwd (repo root) instead of infra/. Use -C "$SCRIPT_DIR" so it always finds infra/Makefile regardless of where the script is invoked from. Add -f flag to podman compose up so it finds infra/docker-compose.yml from any working directory. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH
106 lines
4.1 KiB
Bash
Executable file
106 lines
4.1 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
cd "$SCRIPT_DIR"
|
|
|
|
# ── Load .env from repo root ───────────────────────────────────────────────────
|
|
if [ -f "$REPO_ROOT/.env" ]; then
|
|
set -a; source "$REPO_ROOT/.env"; set +a
|
|
fi
|
|
|
|
DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-}"
|
|
ACME_EMAIL="${ACME_EMAIL:-}"
|
|
|
|
# ── Validate ───────────────────────────────────────────────────────────────────
|
|
if [ -z "$DOMAIN_SUFFIX" ] || [ "$DOMAIN_SUFFIX" = "localhost" ]; then
|
|
echo "ERROR: Set DOMAIN_SUFFIX to your real domain in infra/.env"
|
|
exit 1
|
|
fi
|
|
|
|
if [ -z "$ACME_EMAIL" ]; then
|
|
echo "ERROR: Set ACME_EMAIL in infra/.env (required for Let's Encrypt)"
|
|
exit 1
|
|
fi
|
|
|
|
# ── Generate production caddy.json ─────────────────────────────────────────────
|
|
# Writes TLS-enabled config using Let's Encrypt (no Cloudflare required).
|
|
# Caddy will use the HTTP-01 challenge (port 80) or TLS-ALPN-01 (port 443).
|
|
cat > "$SCRIPT_DIR/../proxy/caddy.json" <<EOF
|
|
{
|
|
"admin": { "listen": "0.0.0.0:2019" },
|
|
"apps": {
|
|
"tls": {
|
|
"automation": {
|
|
"policies": [{
|
|
"subjects": ["${DOMAIN_SUFFIX}"],
|
|
"issuers": [{"module": "acme", "email": "${ACME_EMAIL}"}]
|
|
}]
|
|
}
|
|
},
|
|
"http": {
|
|
"servers": {
|
|
"hiy": {
|
|
"listen": [":80", ":443"],
|
|
"automatic_https": {},
|
|
"routes": [
|
|
{
|
|
"match": [{"host": ["${DOMAIN_SUFFIX}"]}],
|
|
"handle": [{"handler": "reverse_proxy", "upstreams": [{"dial": "server:3000"}]}]
|
|
}
|
|
]
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
EOF
|
|
|
|
echo "[hiy] Generated proxy/caddy.json for ${DOMAIN_SUFFIX}"
|
|
|
|
# ── Ensure Podman socket is active ────────────────────────────────────────────
|
|
# /run/user/<uid> is created by PAM/logind; it doesn't exist in non-login
|
|
# shells. Unconditionally redirect all Podman runtime state to /tmp so we
|
|
# never depend on logind, regardless of what XDG_RUNTIME_DIR was set to
|
|
# by the calling environment.
|
|
_HIY_RUNTIME="/tmp/podman-$(id -u)"
|
|
mkdir -p "$_HIY_RUNTIME"
|
|
export XDG_RUNTIME_DIR="$_HIY_RUNTIME"
|
|
|
|
# Write storage.conf and containers.conf so Podman doesn't read stale
|
|
# RunRoot / tmp_dir values from existing user config files.
|
|
mkdir -p "$HOME/.config/containers"
|
|
cat > "$HOME/.config/containers/storage.conf" <<STOCONF
|
|
[storage]
|
|
driver = "overlay"
|
|
runroot = "$_HIY_RUNTIME/storage"
|
|
graphroot = "$HOME/.local/share/containers/storage"
|
|
STOCONF
|
|
cat > "$HOME/.config/containers/containers.conf" <<CCONF
|
|
[engine]
|
|
tmp_dir = "$_HIY_RUNTIME"
|
|
CCONF
|
|
|
|
PODMAN_SOCK="${_HIY_RUNTIME}/podman.sock"
|
|
export PODMAN_SOCK
|
|
export DOCKER_HOST="unix://${PODMAN_SOCK}"
|
|
if [ ! -S "$PODMAN_SOCK" ]; then
|
|
echo "[hiy] Starting Podman socket via podman system service…"
|
|
# Use env to guarantee XDG_RUNTIME_DIR is correct even if the calling
|
|
# shell environment has it set to a non-writable path.
|
|
env XDG_RUNTIME_DIR="$_HIY_RUNTIME" \
|
|
podman system service --time=0 "unix://${PODMAN_SOCK}" &
|
|
# Wait up to 5 s for the socket to appear
|
|
for i in 1 2 3 4 5; do
|
|
[ -S "$PODMAN_SOCK" ] && break
|
|
sleep 1
|
|
done
|
|
[ -S "$PODMAN_SOCK" ] || { echo "ERROR: Podman socket did not appear"; exit 1; }
|
|
fi
|
|
|
|
# ── Build images ───────────────────────────────────────────────────────────────
|
|
make -C "$SCRIPT_DIR" build
|
|
|
|
# ── Start services (detached) ──────────────────────────────────────────────────
|
|
podman compose --env-file "$REPO_ROOT/.env" -f "$SCRIPT_DIR/docker-compose.yml" up -d
|