Without entries in /etc/subuid and /etc/subgid, Podman cannot map the UIDs/GIDs present in image layers (e.g. gid 42 for /etc/shadow) into the user namespace, causing 'lchown: invalid argument' on layer extraction. Add a 65536-ID range starting at 100000 for the current user if missing, then run podman system migrate so existing storage is updated. https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH
123 lines
5.2 KiB
Bash
Executable file
123 lines
5.2 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
cd "$SCRIPT_DIR"
|
|
|
|
# ── Load .env from repo root ───────────────────────────────────────────────────
|
|
if [ -f "$REPO_ROOT/.env" ]; then
|
|
set -a; source "$REPO_ROOT/.env"; set +a
|
|
fi
|
|
|
|
DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-}"
|
|
ACME_EMAIL="${ACME_EMAIL:-}"
|
|
|
|
# ── Validate ───────────────────────────────────────────────────────────────────
|
|
if [ -z "$DOMAIN_SUFFIX" ] || [ "$DOMAIN_SUFFIX" = "localhost" ]; then
|
|
echo "ERROR: Set DOMAIN_SUFFIX to your real domain in infra/.env"
|
|
exit 1
|
|
fi
|
|
|
|
if [ -z "$ACME_EMAIL" ]; then
|
|
echo "ERROR: Set ACME_EMAIL in infra/.env (required for Let's Encrypt)"
|
|
exit 1
|
|
fi
|
|
|
|
# ── Generate production caddy.json ─────────────────────────────────────────────
|
|
# Writes TLS-enabled config using Let's Encrypt (no Cloudflare required).
|
|
# Caddy will use the HTTP-01 challenge (port 80) or TLS-ALPN-01 (port 443).
|
|
cat > "$SCRIPT_DIR/../proxy/caddy.json" <<EOF
|
|
{
|
|
"admin": { "listen": "0.0.0.0:2019" },
|
|
"apps": {
|
|
"tls": {
|
|
"automation": {
|
|
"policies": [{
|
|
"subjects": ["${DOMAIN_SUFFIX}"],
|
|
"issuers": [{"module": "acme", "email": "${ACME_EMAIL}"}]
|
|
}]
|
|
}
|
|
},
|
|
"http": {
|
|
"servers": {
|
|
"hiy": {
|
|
"listen": [":80", ":443"],
|
|
"automatic_https": {},
|
|
"routes": [
|
|
{
|
|
"match": [{"host": ["${DOMAIN_SUFFIX}"]}],
|
|
"handle": [{"handler": "reverse_proxy", "upstreams": [{"dial": "server:3000"}]}]
|
|
}
|
|
]
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
EOF
|
|
|
|
echo "[hiy] Generated proxy/caddy.json for ${DOMAIN_SUFFIX}"
|
|
|
|
# ── Ensure subuid/subgid entries exist for rootless Podman ────────────────────
|
|
# Rootless Podman maps UIDs/GIDs inside containers using subordinate ID ranges
|
|
# from /etc/subuid and /etc/subgid. Without a sufficient range, pulling or
|
|
# building images whose layers contain files owned by non-root UIDs/GIDs fails
|
|
# with "invalid argument" / "insufficient UIDs or GIDs in user namespace".
|
|
# Standard range: 65536 subordinate IDs starting at 100000.
|
|
_HIY_USER="$(id -un)"
|
|
_HIY_SUBID_CHANGED=0
|
|
if ! grep -q "^${_HIY_USER}:" /etc/subuid 2>/dev/null; then
|
|
echo "${_HIY_USER}:100000:65536" | sudo tee -a /etc/subuid > /dev/null
|
|
_HIY_SUBID_CHANGED=1
|
|
fi
|
|
if ! grep -q "^${_HIY_USER}:" /etc/subgid 2>/dev/null; then
|
|
echo "${_HIY_USER}:100000:65536" | sudo tee -a /etc/subgid > /dev/null
|
|
_HIY_SUBID_CHANGED=1
|
|
fi
|
|
# Migrate existing Podman storage to the new mappings when entries were added.
|
|
[ "$_HIY_SUBID_CHANGED" = "1" ] && podman system migrate
|
|
|
|
# ── Allow rootless processes to bind ports 80/443 ─────────────────────────────
|
|
# Rootless Podman cannot bind privileged ports (<1024) by default.
|
|
# Lower the threshold to 80 for this boot, and persist it across reboots.
|
|
if [ "$(sysctl -n net.ipv4.ip_unprivileged_port_start)" -gt 80 ]; then
|
|
sudo sysctl -w net.ipv4.ip_unprivileged_port_start=80
|
|
grep -qxF 'net.ipv4.ip_unprivileged_port_start=80' /etc/sysctl.conf 2>/dev/null \
|
|
|| echo 'net.ipv4.ip_unprivileged_port_start=80' | sudo tee -a /etc/sysctl.conf > /dev/null
|
|
fi
|
|
|
|
# ── Ensure Podman socket is active ────────────────────────────────────────────
|
|
# Podman rootless resets XDG_RUNTIME_DIR to /run/user/<uid> if that directory
|
|
# exists (regardless of what the caller set). So we must ensure that directory
|
|
# exists and is writable by the current user — this is normally done by
|
|
# PAM/logind but doesn't happen in non-login shells.
|
|
_HIY_XDG="/run/user/$(id -u)"
|
|
if [ ! -d "$_HIY_XDG" ]; then
|
|
sudo mkdir -p "$_HIY_XDG"
|
|
fi
|
|
if [ ! -w "$_HIY_XDG" ]; then
|
|
sudo chown "$(id -u):$(id -g)" "$_HIY_XDG"
|
|
sudo chmod 0700 "$_HIY_XDG"
|
|
fi
|
|
export XDG_RUNTIME_DIR="$_HIY_XDG"
|
|
|
|
PODMAN_SOCK="${_HIY_XDG}/podman.sock"
|
|
export PODMAN_SOCK
|
|
export DOCKER_HOST="unix://${PODMAN_SOCK}"
|
|
if [ ! -S "$PODMAN_SOCK" ]; then
|
|
echo "[hiy] Starting Podman socket via podman system service…"
|
|
podman system service --time=0 "unix://${PODMAN_SOCK}" &
|
|
# Wait up to 5 s for the socket to appear
|
|
for i in 1 2 3 4 5; do
|
|
[ -S "$PODMAN_SOCK" ] && break
|
|
sleep 1
|
|
done
|
|
[ -S "$PODMAN_SOCK" ] || { echo "ERROR: Podman socket did not appear"; exit 1; }
|
|
fi
|
|
|
|
# ── Build images ───────────────────────────────────────────────────────────────
|
|
make -C "$SCRIPT_DIR" build
|
|
|
|
# ── Start services (detached) ──────────────────────────────────────────────────
|
|
podman compose --env-file "$REPO_ROOT/.env" -f "$SCRIPT_DIR/docker-compose.yml" up -d
|