Hostityourself/builder/build.sh
Claude 63a1ae6065
Remove --memory limit to avoid memory.swap.max cgroup error on Pi
Raspberry Pi OS does not enable swap cgroup accounting by default.
Even --memory-swap=-1 causes runc to write "max" to memory.swap.max,
which fails with ENOENT when the file does not exist.

Removing --memory entirely means runc skips all memory.* cgroup writes.
--cpus is unaffected (uses cpu.max, which is always present).

https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH
2026-03-22 15:23:49 +00:00

224 lines
9 KiB
Bash
Executable file

#!/usr/bin/env bash
# HIY Build Engine
# Environment variables injected by hiy-server:
# APP_ID, APP_NAME, REPO_URL, BRANCH, PORT, ENV_FILE, SHA, BUILD_DIR
set -euo pipefail
log() { echo "[hiy] $*"; }
log "=== HostItYourself Build Engine ==="
log "App: $APP_NAME ($APP_ID)"
log "Repo: $REPO_URL"
log "Branch: $BRANCH"
log "Build dir: $BUILD_DIR"
# ── 1. Clone or pull ───────────────────────────────────────────────────────────
mkdir -p "$BUILD_DIR"
cd "$BUILD_DIR"
if [ -d ".git" ]; then
log "Updating existing clone…"
git fetch origin "$BRANCH" --depth=50
git checkout "$BRANCH"
git reset --hard "origin/$BRANCH"
else
log "Cloning repository…"
git clone --depth=50 --branch "$BRANCH" "$REPO_URL" .
fi
ACTUAL_SHA=$(git rev-parse HEAD)
log "SHA: $ACTUAL_SHA"
# ── 2. Detect build strategy ──────────────────────────────────────────────────
IMAGE_TAG="hiy/${APP_ID}:${ACTUAL_SHA}"
CONTAINER_NAME="hiy-${APP_ID}"
if [ -f "Dockerfile" ]; then
log "Strategy: Dockerfile"
podman build --tag "$IMAGE_TAG" .
elif [ -f "package.json" ] || [ -f "yarn.lock" ]; then
log "Strategy: Node.js (Cloud Native Buildpack)"
if ! command -v pack &>/dev/null; then
log "ERROR: 'pack' CLI not found. Install it: https://buildpacks.io/docs/tools/pack/"
exit 1
fi
pack build "$IMAGE_TAG" --builder paketobuildpacks/builder-jammy-base
elif [ -f "requirements.txt" ] || [ -f "pyproject.toml" ]; then
log "Strategy: Python (Cloud Native Buildpack)"
if ! command -v pack &>/dev/null; then
log "ERROR: 'pack' CLI not found. Install it: https://buildpacks.io/docs/tools/pack/"
exit 1
fi
pack build "$IMAGE_TAG" --builder paketobuildpacks/builder-jammy-base
elif [ -f "go.mod" ]; then
log "Strategy: Go (Cloud Native Buildpack)"
if ! command -v pack &>/dev/null; then
log "ERROR: 'pack' CLI not found. Install it: https://buildpacks.io/docs/tools/pack/"
exit 1
fi
pack build "$IMAGE_TAG" --builder paketobuildpacks/builder-jammy-base
elif [ -d "static" ] || [ -d "public" ]; then
STATIC_DIR="static"
[ -d "public" ] && STATIC_DIR="public"
log "Strategy: Static files (Caddy) from ./$STATIC_DIR"
cat > Dockerfile.hiy <<EOF
FROM caddy:2-alpine
COPY $STATIC_DIR /srv
EOF
podman build --file Dockerfile.hiy --tag "$IMAGE_TAG" .
rm -f Dockerfile.hiy
else
log "ERROR: Could not detect build strategy."
log "Add a Dockerfile, package.json, requirements.txt, go.mod, or a static/ directory."
exit 1
fi
# ── 3. Ensure Podman network ───────────────────────────────────────────────────
podman network create hiy-net 2>/dev/null || true
# ── 4. Stop & remove previous container ───────────────────────────────────────
if podman ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
log "Stopping old container…"
podman stop "$CONTAINER_NAME" >/dev/null 2>&1 || true
podman rm "$CONTAINER_NAME" >/dev/null 2>&1 || true
fi
# ── 5. Start new container ────────────────────────────────────────────────────
log "Starting container ${CONTAINER_NAME}"
ENV_FILE_ARG=()
if [ -n "${ENV_FILE:-}" ] && [ -f "$ENV_FILE" ]; then
ENV_FILE_ARG=(--env-file "$ENV_FILE")
else
log "No env file found at '${ENV_FILE:-}'; starting without one."
fi
podman run --detach \
--name "$CONTAINER_NAME" \
--network hiy-net \
"${ENV_FILE_ARG[@]+"${ENV_FILE_ARG[@]}"}" \
--env "PORT=${PORT}" \
--expose "$PORT" \
--label "hiy.app=${APP_ID}" \
--label "hiy.port=${PORT}" \
--restart unless-stopped \
--cpus="0.5" \
"$IMAGE_TAG"
# ── 6. Update Caddy via its admin API ─────────────────────────────────────────
CADDY_API="${CADDY_API_URL:-http://localhost:2019}"
DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-localhost}"
if curl --silent --fail "${CADDY_API}/config/" >/dev/null 2>&1; then
CONTAINER_IP=$(podman inspect "$CONTAINER_NAME" \
--format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}')
UPSTREAM="${CONTAINER_IP}:${PORT}"
log "Updating Caddy: ${APP_ID}.${DOMAIN_SUFFIX}${UPSTREAM}"
# Discover actual server name (Caddyfile adapter names it 'srv0', not 'hiy').
CADDY_SERVER=$(curl --silent "${CADDY_API}/config/apps/http/servers/" 2>/dev/null | \
python3 -c "import sys,json; d=json.load(sys.stdin); print(next(iter(d)))" 2>/dev/null || true)
if [ -z "$CADDY_SERVER" ]; then
log "WARNING: Could not discover Caddy server name; skipping route update."
else
ROUTES_URL="${CADDY_API}/config/apps/http/servers/${CADDY_SERVER}/routes"
# Route JSON uses Caddy's forward_auth pattern:
# 1. HIY server checks the session cookie and app-level permission at /auth/verify
# 2. On 2xx → Caddy proxies to the app container
# 3. On anything else (e.g. 302 redirect to /login) → Caddy passes through to the client
ROUTE_JSON=$(python3 -c "
import json, sys
upstream = sys.argv[1]
app_host = sys.argv[2]
hiy_server = 'server:3000'
route = {
'match': [{'host': [app_host]}],
'handle': [{
'handler': 'subroute',
'routes': [{
'handle': [{
'handler': 'reverse_proxy',
'rewrite': {'method': 'GET', 'uri': '/auth/verify'},
'headers': {
'request': {
'set': {
'X-Forwarded-Method': ['{http.request.method}'],
'X-Forwarded-Uri': ['{http.request.uri}'],
'X-Forwarded-Host': ['{http.request.host}'],
'X-Forwarded-Proto': ['{http.request.scheme}'],
}
}
},
'upstreams': [{'dial': hiy_server}],
'handle_response': [{
'match': {'status_code': [2]},
'routes': [{'handle': [{'handler': 'reverse_proxy', 'upstreams': [{'dial': upstream}]}]}]
}]
}]
}]
}]
}
print(json.dumps(route))
" "${UPSTREAM}" "${APP_ID}.${DOMAIN_SUFFIX}")
# Upsert the route for this app.
ROUTES=$(curl --silent --fail "${ROUTES_URL}" 2>/dev/null || echo "[]")
# Remove existing route for the same host, rebuild list, keep dashboard as catch-all.
UPDATED=$(echo "$ROUTES" | python3 -c "
import sys, json
try:
routes = json.loads(sys.stdin.read())
if not isinstance(routes, list):
routes = []
except Exception:
routes = []
DASHBOARD = {'handle': [{'handler': 'reverse_proxy', 'upstreams': [{'dial': 'server:3000'}]}]}
new_host = '${APP_ID}.${DOMAIN_SUFFIX}'
# Keep host-matched routes that are NOT for this app
routes = [r for r in routes
if r.get('match') and not any(
isinstance(m, dict) and new_host in m.get('host', [])
for m in r.get('match', []))]
routes.insert(0, json.loads(sys.argv[1]))
routes.append(DASHBOARD)
print(json.dumps(routes))
" "$ROUTE_JSON")
log "Upstream: ${UPSTREAM}"
log "Routes JSON: ${UPDATED}"
set +e
CADDY_RESP=$(curl --silent --show-error \
--write-out "\nHTTP_STATUS:%{http_code}" \
"${ROUTES_URL}" \
--header "Content-Type: application/json" \
--request PATCH \
--data "$UPDATED" 2>&1)
set -e
if echo "$CADDY_RESP" | grep -q "HTTP_STATUS:2"; then
log "Caddy updated."
else
log "WARNING: Caddy update failed (app is running; fix routing manually)."
log "Caddy response: ${CADDY_RESP}"
fi
fi # end: CADDY_SERVER not empty
else
log "Caddy admin API not reachable; skipping route update."
log "Container ${CONTAINER_NAME} is running on port ${PORT} but not publicly routed."
log "To reach it directly, re-run the container with a published port:"
log " podman rm -f ${CONTAINER_NAME}"
log " podman run -d --name ${CONTAINER_NAME} -p ${PORT}:${PORT} ${IMAGE_TAG}"
log "Or point any reverse proxy at: \$(podman inspect ${CONTAINER_NAME} --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'):${PORT}"
fi
# ── 7. Prune old images ───────────────────────────────────────────────────────
log "Pruning old images (keeping last 3)…"
podman images "hiy/${APP_ID}" --format "{{.ID}}\t{{.CreatedAt}}" \
| sort --reverse --key=2 \
| tail -n +4 \
| awk '{print $1}' \
| xargs --no-run-if-empty podman rmi 2>/dev/null || true
log "=== Build complete: $APP_NAME @ $ACTUAL_SHA ==="