Replace Docker with Podman throughout

- builder/build.sh: all docker commands → podman (build, run, stop, rm,
  network create, images, rmi, inspect)
- server/src/routes/apps.rs: docker stop/restart → podman
- server/src/routes/ui.rs: docker inspect → podman
- infra/Dockerfile.server: install podman instead of docker.io
- infra/docker-compose.yml: rename docker-proxy → podman-proxy, mount
  /run/podman/podman.sock (rootful Podman socket), update DOCKER_HOST
- infra/Makefile: docker compose → podman compose

Podman is daemonless and rootless by default; OCI images are identical so
no build-pipeline changes are needed beyond renaming the CLI.

https://claude.ai/code/session_01FKCW3FDjNFj6jve4niMFXH
This commit is contained in:
Claude 2026-03-20 14:58:52 +00:00
parent 0569252edf
commit 4319b99102
No known key found for this signature in database
6 changed files with 33 additions and 29 deletions

View file

@ -35,7 +35,7 @@ CONTAINER_NAME="hiy-${APP_ID}"
if [ -f "Dockerfile" ]; then
log "Strategy: Dockerfile"
docker build --tag "$IMAGE_TAG" .
podman build --tag "$IMAGE_TAG" .
elif [ -f "package.json" ] || [ -f "yarn.lock" ]; then
log "Strategy: Node.js (Cloud Native Buildpack)"
@ -69,7 +69,7 @@ elif [ -d "static" ] || [ -d "public" ]; then
FROM caddy:2-alpine
COPY $STATIC_DIR /srv
EOF
docker build --file Dockerfile.hiy --tag "$IMAGE_TAG" .
podman build --file Dockerfile.hiy --tag "$IMAGE_TAG" .
rm -f Dockerfile.hiy
else
@ -78,14 +78,14 @@ else
exit 1
fi
# ── 3. Ensure Docker network ───────────────────────────────────────────────────
docker network create hiy-net 2>/dev/null || true
# ── 3. Ensure Podman network ───────────────────────────────────────────────────
podman network create hiy-net 2>/dev/null || true
# ── 4. Stop & remove previous container ───────────────────────────────────────
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
if podman ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
log "Stopping old container…"
docker stop "$CONTAINER_NAME" >/dev/null 2>&1 || true
docker rm "$CONTAINER_NAME" >/dev/null 2>&1 || true
podman stop "$CONTAINER_NAME" >/dev/null 2>&1 || true
podman rm "$CONTAINER_NAME" >/dev/null 2>&1 || true
fi
# ── 5. Start new container ────────────────────────────────────────────────────
@ -96,7 +96,7 @@ if [ -n "${ENV_FILE:-}" ] && [ -f "$ENV_FILE" ]; then
else
log "No env file found at '${ENV_FILE:-}'; starting without one."
fi
docker run --detach \
podman run --detach \
--name "$CONTAINER_NAME" \
--network hiy-net \
"${ENV_FILE_ARG[@]+"${ENV_FILE_ARG[@]}"}" \
@ -114,7 +114,7 @@ CADDY_API="${CADDY_API_URL:-http://localhost:2019}"
DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-localhost}"
if curl --silent --fail "${CADDY_API}/config/" >/dev/null 2>&1; then
CONTAINER_IP=$(docker inspect "$CONTAINER_NAME" \
CONTAINER_IP=$(podman inspect "$CONTAINER_NAME" \
--format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}')
UPSTREAM="${CONTAINER_IP}:${PORT}"
log "Updating Caddy: ${APP_ID}.${DOMAIN_SUFFIX}${UPSTREAM}"
@ -209,17 +209,17 @@ else
log "Caddy admin API not reachable; skipping route update."
log "Container ${CONTAINER_NAME} is running on port ${PORT} but not publicly routed."
log "To reach it directly, re-run the container with a published port:"
log " docker rm -f ${CONTAINER_NAME}"
log " docker run -d --name ${CONTAINER_NAME} -p ${PORT}:${PORT} ${IMAGE_TAG}"
log "Or point any reverse proxy at: \$(docker inspect ${CONTAINER_NAME} --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'):${PORT}"
log " podman rm -f ${CONTAINER_NAME}"
log " podman run -d --name ${CONTAINER_NAME} -p ${PORT}:${PORT} ${IMAGE_TAG}"
log "Or point any reverse proxy at: \$(podman inspect ${CONTAINER_NAME} --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'):${PORT}"
fi
# ── 7. Prune old images ───────────────────────────────────────────────────────
log "Pruning old images (keeping last 3)…"
docker images "hiy/${APP_ID}" --format "{{.ID}}\t{{.CreatedAt}}" \
podman images "hiy/${APP_ID}" --format "{{.ID}}\t{{.CreatedAt}}" \
| sort --reverse --key=2 \
| tail -n +4 \
| awk '{print $1}' \
| xargs --no-run-if-empty docker rmi 2>/dev/null || true
| xargs --no-run-if-empty podman rmi 2>/dev/null || true
log "=== Build complete: $APP_NAME @ $ACTUAL_SHA ==="

View file

@ -68,7 +68,7 @@ RUN apt-get update && apt-get install -y \
curl \
bash \
python3 \
docker.io \
podman \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /usr/local/bin/hiy-server /usr/local/bin/hiy-server

View file

@ -1,6 +1,8 @@
# HIY — docker compose helpers
# HIY — podman compose helpers
# Usage: make <target>
#
# Requires: podman + podman-compose (or Docker with compose plugin as fallback)
#
# Default (make up) — auto-detects the host platform.
#
# Explicit cross-compile targets:
@ -9,7 +11,7 @@
# up-armv7 — linux/arm/v7 (Pi 2/3/4 32-bit OS)
# up-armv6 — linux/arm/v6 (Pi Zero, Pi 1)
COMPOSE = docker compose
COMPOSE = podman compose
BUILD = $(COMPOSE) build
UP = $(COMPOSE) up --build

View file

@ -1,18 +1,20 @@
# HIY — local development stack
# Run with: docker compose up --build
# Run with: podman compose up --build (or: docker compose up --build)
#
# On a real Pi you would run Caddy as a systemd service; here it runs in Compose
# so you can develop without changing the host.
services:
# ── Docker socket proxy (unix → TCP) ──────────────────────────────────────
docker-proxy:
# ── Podman socket proxy (unix → TCP) ──────────────────────────────────────
# Rootful Podman socket: /run/podman/podman.sock
# Rootless Podman socket: /run/user/<UID>/podman/podman.sock
podman-proxy:
image: alpine/socat
command: tcp-listen:2375,fork,reuseaddr unix-connect:/var/run/docker.sock
command: tcp-listen:2375,fork,reuseaddr unix-connect:/run/podman/podman.sock
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /run/podman/podman.sock:/run/podman/podman.sock
networks:
- hiy-net
@ -36,12 +38,12 @@ services:
HIY_ADDR: 0.0.0.0:3000
HIY_BUILD_SCRIPT: /app/builder/build.sh
CADDY_API_URL: http://caddy:2019
DOCKER_HOST: tcp://docker-proxy:2375
DOCKER_HOST: tcp://podman-proxy:2375
RUST_LOG: hiy_server=debug,tower_http=info
depends_on:
caddy:
condition: service_started
docker-proxy:
podman-proxy:
condition: service_started
networks:
- hiy-net

View file

@ -108,7 +108,7 @@ pub async fn stop(
Path(id): Path<String>,
) -> Result<StatusCode, StatusCode> {
let name = format!("hiy-{}", id);
let out = tokio::process::Command::new("docker")
let out = tokio::process::Command::new("podman")
.args(["stop", &name])
.output()
.await
@ -116,7 +116,7 @@ pub async fn stop(
if out.status.success() {
Ok(StatusCode::NO_CONTENT)
} else {
tracing::warn!("docker stop {}: {}", name, String::from_utf8_lossy(&out.stderr));
tracing::warn!("podman stop {}: {}", name, String::from_utf8_lossy(&out.stderr));
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
@ -126,7 +126,7 @@ pub async fn restart(
Path(id): Path<String>,
) -> Result<StatusCode, StatusCode> {
let name = format!("hiy-{}", id);
let out = tokio::process::Command::new("docker")
let out = tokio::process::Command::new("podman")
.args(["restart", &name])
.output()
.await
@ -134,7 +134,7 @@ pub async fn restart(
if out.status.success() {
Ok(StatusCode::NO_CONTENT)
} else {
tracing::warn!("docker restart {}: {}", name, String::from_utf8_lossy(&out.stderr));
tracing::warn!("podman restart {}: {}", name, String::from_utf8_lossy(&out.stderr));
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}

View file

@ -144,7 +144,7 @@ async fn read_sys_stats() -> SysStats {
async fn get_container_status(app_id: &str) -> String {
let name = format!("hiy-{}", app_id);
match tokio::process::Command::new("docker")
match tokio::process::Command::new("podman")
.args(["inspect", "--format", "{{.State.Status}}", &name])
.output()
.await