Migrate from Fly.io to docker-server (img.pq.io)
- Add infra/docker-compose.yml, deploy-stack.sh, .env.example - Remove Fly.io GitHub Actions workflow - Build via EC2, deploy via Portainer + Caddy on Hetzner Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
14
.github/workflows/deploy.yml
vendored
14
.github/workflows/deploy.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Deploy to Fly.io
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: superfly/flyctl-actions/setup-flyctl@master
|
||||
- run: flyctl deploy --remote-only
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -4,12 +4,18 @@ node_modules/
|
||||
# Environment / secrets
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
!infra/.env.example
|
||||
infra/.deploy-secrets
|
||||
|
||||
# Logs
|
||||
npm-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
|
||||
# Claude memory
|
||||
memory/
|
||||
|
||||
# OS / editor junk
|
||||
.DS_Store
|
||||
.vscode
|
||||
|
||||
1
infra/.env.example
Normal file
1
infra/.env.example
Normal file
@@ -0,0 +1 @@
|
||||
CONVERTER_TOKEN=your-secret-token-here
|
||||
214
infra/deploy-stack.sh
Normal file
214
infra/deploy-stack.sh
Normal file
@@ -0,0 +1,214 @@
|
||||
#!/bin/bash
|
||||
# Deploy a project stack to Portainer and wire it into Caddy.
|
||||
#
|
||||
# 1. Creates (or updates) the project's own Portainer stack
|
||||
# 2. Connects Caddy to the project's network via Docker API
|
||||
# 3. Writes a Caddy route snippet and reloads
|
||||
#
|
||||
# Zero SSH required. Copy this into each project and configure the variables below.
|
||||
#
|
||||
# Required env:
|
||||
# PORTAINER_URL — e.g. https://portainer-1.docker.pq.io
|
||||
# PORTAINER_API_KEY — from docker-server setup.sh output
|
||||
#
|
||||
# Optional:
|
||||
# ENV_FILE — path to .env file (default: ./infra/.env)
|
||||
# COMPOSE_FILE — path to compose file (default: ./infra/docker-compose.yml)
|
||||
#
|
||||
# Usage:
|
||||
# export PORTAINER_URL=https://portainer-1.docker.pq.io
|
||||
# export PORTAINER_API_KEY=ptr_...
|
||||
# bash infra/deploy-stack.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ── PostConvert configuration ──
|
||||
STACK_NAME="${STACK_NAME:-postconvert}"
|
||||
PROJECT_NETWORK="${PROJECT_NETWORK:-postconvert-net}"
|
||||
CADDY_ROUTES="${CADDY_ROUTES:-$'img.pq.io {\n\treverse_proxy app:8080\n}'}"
|
||||
|
||||
PORTAINER_URL="${PORTAINER_URL:?Set PORTAINER_URL}"
|
||||
PORTAINER_API_KEY="${PORTAINER_API_KEY:?Set PORTAINER_API_KEY}"
|
||||
STACK_NAME="${STACK_NAME:?Set STACK_NAME}"
|
||||
PROJECT_NETWORK="${PROJECT_NETWORK:?Set PROJECT_NETWORK}"
|
||||
CADDY_ROUTES="${CADDY_ROUTES:?Set CADDY_ROUTES}"
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/.env}"
|
||||
COMPOSE_FILE="${COMPOSE_FILE:-$SCRIPT_DIR/docker-compose.yml}"
|
||||
|
||||
for f in "$ENV_FILE" "$COMPOSE_FILE"; do
|
||||
[ -f "$f" ] || { echo "ERROR: not found: $f"; exit 1; }
|
||||
done
|
||||
|
||||
API="$PORTAINER_URL/api"
|
||||
|
||||
# ── Get endpoint ID ──
|
||||
echo "[$STACK_NAME] Looking up Portainer endpoint..."
|
||||
ENDPOINT_ID=$(curl -s "$API/endpoints" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" | \
|
||||
python3 -c "import sys,json; print(json.load(sys.stdin)[0]['Id'])")
|
||||
|
||||
[ -z "$ENDPOINT_ID" ] && { echo "ERROR: No Portainer endpoint found"; exit 1; }
|
||||
DOCKER_API="$API/endpoints/$ENDPOINT_ID/docker"
|
||||
|
||||
# ── Helper: find container ID by name substring ──
|
||||
find_container() {
|
||||
local name="$1"
|
||||
curl -s "$DOCKER_API/containers/json" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" | \
|
||||
python3 -c "
|
||||
import sys, json
|
||||
for c in json.load(sys.stdin):
|
||||
for n in c.get('Names', []):
|
||||
if '/$name' in n:
|
||||
print(c['Id'][:12])
|
||||
sys.exit(0)
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# ── Helper: exec in container (detached) ──
|
||||
container_exec() {
|
||||
local container_id="$1"; shift
|
||||
local cmd_json
|
||||
cmd_json=$(python3 -c "import json,sys; print(json.dumps(sys.argv[1:]))" "$@")
|
||||
|
||||
local exec_id
|
||||
exec_id=$(curl -s -X POST "$DOCKER_API/containers/$container_id/exec" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"Cmd\":$cmd_json,\"Detach\":true}" | \
|
||||
python3 -c "import sys,json; print(json.load(sys.stdin)['Id'])")
|
||||
|
||||
curl -s -X POST "$DOCKER_API/exec/$exec_id/start" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"Detach":true}' > /dev/null
|
||||
}
|
||||
|
||||
# ══════════════════════════════════════════════
|
||||
# 1. Deploy project stack
|
||||
# ══════════════════════════════════════════════
|
||||
|
||||
build_payload() {
|
||||
local mode="$1"
|
||||
python3 - "$COMPOSE_FILE" "$ENV_FILE" "$STACK_NAME" "$mode" <<'PYEOF'
|
||||
import json, sys
|
||||
|
||||
compose_file, env_file, stack_name, mode = sys.argv[1:5]
|
||||
|
||||
with open(compose_file) as f:
|
||||
compose = f.read()
|
||||
|
||||
import re
|
||||
|
||||
env_vars = []
|
||||
with open(env_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
key, _, value = line.partition('=')
|
||||
if key:
|
||||
env_vars.append({"name": key, "value": value})
|
||||
|
||||
# Replace env_file directives with explicit environment: blocks.
|
||||
# Portainer injects stack-level env vars which docker compose substitutes
|
||||
# into ${VAR} expressions — each container gets all its vars this way.
|
||||
def replace_env_file(m):
|
||||
indent = re.search(r'\n(\s+)env_file', m.group(0)).group(1)
|
||||
lines = [f'\n{indent}environment:']
|
||||
for var in env_vars:
|
||||
lines.append(f'{indent} {var["name"]}: "${{{var["name"]}}}"')
|
||||
return '\n'.join(lines)
|
||||
|
||||
compose = re.sub(r'\n\s+env_file:[^\n]*', replace_env_file, compose)
|
||||
|
||||
payload = {"stackFileContent": compose, "env": env_vars}
|
||||
if mode == "create":
|
||||
payload["name"] = stack_name
|
||||
else:
|
||||
payload["prune"] = True
|
||||
|
||||
json.dump(payload, sys.stdout)
|
||||
PYEOF
|
||||
}
|
||||
|
||||
echo "[$STACK_NAME] Checking for existing stack..."
|
||||
EXISTING_ID=$(curl -s "$API/stacks" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" | \
|
||||
python3 -c "
|
||||
import sys, json
|
||||
for s in json.load(sys.stdin):
|
||||
if s['Name'] == '$STACK_NAME':
|
||||
print(s['Id']); break
|
||||
" 2>/dev/null || true)
|
||||
|
||||
if [ -n "$EXISTING_ID" ]; then
|
||||
echo "[$STACK_NAME] Updating stack (ID: $EXISTING_ID)..."
|
||||
build_payload update | curl -s -X PUT "$API/stacks/$EXISTING_ID?endpointId=$ENDPOINT_ID" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @- > /dev/null
|
||||
else
|
||||
echo "[$STACK_NAME] Creating stack..."
|
||||
build_payload create | curl -s -X POST "$API/stacks/create/standalone/string?endpointId=$ENDPOINT_ID" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @- > /dev/null
|
||||
fi
|
||||
echo "[$STACK_NAME] Stack deployed."
|
||||
|
||||
# ══════════════════════════════════════════════
|
||||
# 2. Connect Caddy to project network
|
||||
# ══════════════════════════════════════════════
|
||||
|
||||
echo "[$STACK_NAME] Connecting Caddy to $PROJECT_NETWORK..."
|
||||
|
||||
CADDY_ID=$(find_container "shared-caddy-1")
|
||||
[ -z "$CADDY_ID" ] && CADDY_ID=$(find_container "caddy")
|
||||
|
||||
if [ -z "$CADDY_ID" ]; then
|
||||
echo "WARNING: Caddy container not found — skipping network + route setup."
|
||||
else
|
||||
# Wait for network to appear (stack may still be starting)
|
||||
NET_ID=""
|
||||
for i in $(seq 1 30); do
|
||||
NET_ID=$(curl -s "$DOCKER_API/networks" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" | \
|
||||
python3 -c "
|
||||
import sys, json
|
||||
for n in json.load(sys.stdin):
|
||||
if n['Name'] == '$PROJECT_NETWORK':
|
||||
print(n['Id'][:12]); break
|
||||
" 2>/dev/null || true)
|
||||
[ -n "$NET_ID" ] && break
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ -z "$NET_ID" ]; then
|
||||
echo "WARNING: Network $PROJECT_NETWORK not found after 30s — skipping Caddy wiring."
|
||||
else
|
||||
# Connect Caddy to project network (ignore error if already connected)
|
||||
curl -s -X POST "$DOCKER_API/networks/$NET_ID/connect" \
|
||||
-H "X-API-Key: $PORTAINER_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"Container\":\"$CADDY_ID\"}" > /dev/null 2>&1 || true
|
||||
echo "[$STACK_NAME] Caddy connected to $PROJECT_NETWORK."
|
||||
fi
|
||||
|
||||
# ══════════════════════════════════════════════
|
||||
# 3. Write Caddy route snippet + reload
|
||||
# ══════════════════════════════════════════════
|
||||
|
||||
echo "[$STACK_NAME] Configuring Caddy route..."
|
||||
ROUTES_B64=$(printf '%s' "$CADDY_ROUTES" | base64 | tr -d '\n')
|
||||
container_exec "$CADDY_ID" sh -c "echo '$ROUTES_B64' | base64 -d > /etc/caddy/sites/$STACK_NAME.caddy"
|
||||
sleep 1
|
||||
container_exec "$CADDY_ID" caddy reload --config /etc/caddy/Caddyfile
|
||||
echo "[$STACK_NAME] Caddy route configured."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== $STACK_NAME deployed ==="
|
||||
echo "Check status at: $PORTAINER_URL"
|
||||
19
infra/docker-compose.yml
Normal file
19
infra/docker-compose.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
services:
|
||||
app:
|
||||
image: registry.docker.pq.io/postconvert:latest
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- postconvert-net
|
||||
environment:
|
||||
CONVERTER_TOKEN: ${CONVERTER_TOKEN}
|
||||
labels:
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 1g
|
||||
|
||||
networks:
|
||||
postconvert-net:
|
||||
name: postconvert-net
|
||||
Reference in New Issue
Block a user