ops: add new deployment and operator automation scripts
This commit is contained in:
@@ -203,7 +203,8 @@ CT 2301 (besu-rpc-private-1) may fail to start with `lxc.hook.pre-start` due to
|
||||
|
||||
- **Daily/weekly checks:** `./scripts/maintenance/daily-weekly-checks.sh [daily|weekly|all]` — explorer sync (135), RPC health (136), config API (137). **Cron:** `./scripts/maintenance/schedule-daily-weekly-cron.sh [--install|--show]` (daily 08:00, weekly Sun 09:00). See [OPERATIONAL_RUNBOOKS.md](../docs/03-deployment/OPERATIONAL_RUNBOOKS.md) § Maintenance.
|
||||
- **Start firefly-ali-1 (6201):** `./scripts/maintenance/start-firefly-6201.sh [--dry-run] [--host HOST]` — start CT 6201 on r630-02 when needed (optional ongoing).
|
||||
- **Config validation (pre-deploy):** `./scripts/validation/validate-config-files.sh` — set `VALIDATE_REQUIRED_FILES` for required paths. **CI / all validation:** `./scripts/verify/run-all-validation.sh [--skip-genesis]` — dependencies, config files, **cW\* mesh matrix** (merge of `cross-chain-pmm-lps/config/deployment-status.json` and `reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json` when that file exists; no RPC), optional genesis (no LAN/SSH). **Matrix only:** `./scripts/verify/build-cw-mesh-deployment-matrix.sh` — stdout markdown; `--json-out reports/status/cw-mesh-deployment-matrix-latest.json` for machine-readable rows.
|
||||
- **Config validation (pre-deploy):** `./scripts/validation/validate-config-files.sh` — set `VALIDATE_REQUIRED_FILES` for required paths. **CI / all validation:** `./scripts/verify/run-all-validation.sh [--skip-genesis] [--json-out reports/status/run-all-validation-latest.json]` — dependencies, config files, **cW\* mesh matrix** (merge of `cross-chain-pmm-lps/config/deployment-status.json` and `reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json` when that file exists; no RPC), optional genesis (no LAN/SSH). **Matrix only:** `./scripts/verify/build-cw-mesh-deployment-matrix.sh` — stdout markdown; `--json-out reports/status/cw-mesh-deployment-matrix-latest.json` for machine-readable rows.
|
||||
- **Wrapper summaries:** `./scripts/run-completable-tasks-from-anywhere.sh --json-out reports/status/run-completable-tasks-latest.json`, `./scripts/run-e2e-flow-tasks-full-parallel.sh --json-out reports/status/run-e2e-flow-tasks-latest.json`, `./scripts/deployment/run-all-next-steps-chain138.sh --json-out reports/status/run-all-next-steps-chain138-latest.json`, and `./scripts/run-all-operator-tasks-from-lan.sh --json-out reports/status/run-all-operator-tasks-latest.json` produce machine-readable step summaries that match the terminal progress output.
|
||||
|
||||
### 13. Phase 2, 3 & 4 Deployment Scripts
|
||||
|
||||
|
||||
296
scripts/cloudflare/provision-d-bis-mail-dns-and-npmplus.sh
Executable file
296
scripts/cloudflare/provision-d-bis-mail-dns-and-npmplus.sh
Executable file
@@ -0,0 +1,296 @@
|
||||
#!/usr/bin/env bash
|
||||
# Provision mail-related DNS in Cloudflare for d-bis.org, optionally create a Cloudflare
|
||||
# Origin CA cert via API, and add (or update) the matching NPMplus proxy host + custom cert.
|
||||
#
|
||||
# Prerequisites:
|
||||
# - .env: CLOUDFLARE_API_TOKEN (or EMAIL+API_KEY) with Zone:DNS:Edit; for Origin CA also
|
||||
# SSL and Certificates (or use Global API Key for Origin CA — prefer scoped token with
|
||||
# Zone + SSL per Cloudflare dashboard).
|
||||
# - CLOUDFLARE_ZONE_ID_D_BIS_ORG
|
||||
# - NPM_URL, NPM_EMAIL, NPM_PASSWORD for NPMplus API
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/cloudflare/provision-d-bis-mail-dns-and-npmplus.sh --dry-run
|
||||
# ./scripts/cloudflare/provision-d-bis-mail-dns-and-npmplus.sh
|
||||
# PROVISION_CF_ORIGIN_CERT=1 PROVISION_NPM=1 ./scripts/cloudflare/...
|
||||
#
|
||||
# Env (optional):
|
||||
# ZONE_NAME=d-bis.org
|
||||
# MAIL_SUBDOMAIN=mail → mail.d-bis.org
|
||||
# PUBLIC_IP=76.53.10.36
|
||||
# MX_TARGET=mail.d-bis.org FQDN for MX record
|
||||
# MX_PRIORITY=10
|
||||
# SPF_TXT='v=spf1 a mx ~all' root TXT
|
||||
# DMARC_TXT='v=DMARC1; p=none; rua=mailto:postmaster@d-bis.org' (set _dmarc.d-bis.org)
|
||||
# PROVISION_MX=0 1 = set apex MX to MX_TARGET (default 0: do not change — many zones use Zoho/365)
|
||||
# PROVISION_SPF=0 1 = upsert ONE apex TXT to SPF_TXT (default 0: do not clobber Zoho/legacy TXT)
|
||||
# PROVISION_DMARC=0
|
||||
# IP_MAIL_UPSTREAM=192.168.11.32 PMG / webmail HTTP backend
|
||||
# PORT_MAIL_UPSTREAM=8006
|
||||
# MAIL_NPM_BLOCK_EXPLOITS=0 0 = ModSecurity off for finicky UIs (PMG admin)
|
||||
# PROVISION_CF_ORIGIN_CERT=0 1 = request Origin CA + optional upload to NPM
|
||||
# PROVISION_NPM=1 0 = skip NPM proxy host
|
||||
# CERT_OUT_DIR=backups/certs PEMs for Origin CA (if created)
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
[ -f "$PROJECT_ROOT/config/ip-addresses.conf" ] && source "$PROJECT_ROOT/config/ip-addresses.conf"
|
||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||
set +u
|
||||
# shellcheck source=/dev/null
|
||||
source "$PROJECT_ROOT/.env"
|
||||
set -u
|
||||
fi
|
||||
|
||||
ZONE_NAME="${ZONE_NAME:-d-bis.org}"
|
||||
MAIL_SUB="${MAIL_SUBDOMAIN:-mail}"
|
||||
MAIL_FQDN="${MAIL_SUB}.${ZONE_NAME}"
|
||||
ZONE_ID="${CLOUDFLARE_ZONE_ID_D_BIS_ORG:-${CLOUDFLARE_ZONE_ID:-}}"
|
||||
PUBLIC_IP="${PUBLIC_IP:-76.53.10.36}"
|
||||
MX_TARGET="${MX_TARGET:-$MAIL_FQDN}"
|
||||
MX_PRI="${MX_PRIORITY:-10}"
|
||||
SPF_TXT="${SPF_TXT:-v=spf1 a mx ~all}"
|
||||
DMARC_TXT="${DMARC_TXT:-}"
|
||||
PROVISION_MX="${PROVISION_MX:-0}"
|
||||
PROVISION_SPF="${PROVISION_SPF:-0}"
|
||||
PROVISION_DMARC="${PROVISION_DMARC:-0}"
|
||||
MAIL_NPM_BLOCK_EXPLOITS="${MAIL_NPM_BLOCK_EXPLOITS:-0}"
|
||||
IP_MAIL_UP="${IP_MAIL_UPSTREAM:-${IP_PMG:-192.168.11.32}}"
|
||||
PORT_MAIL_UP="${PORT_MAIL_UPSTREAM:-8006}"
|
||||
PROVISION_CF_ORIGIN_CERT="${PROVISION_CF_ORIGIN_CERT:-0}"
|
||||
PROVISION_NPM="${PROVISION_NPM:-1}"
|
||||
CERT_OUT_DIR="${CERT_OUT_DIR:-$PROJECT_ROOT/backups/certs}"
|
||||
|
||||
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS:-192.168.11.167}:81}"
|
||||
# NPM creds only required when not --dry-run and PROVISION_NPM=1
|
||||
|
||||
DRY=0
|
||||
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY=1; done
|
||||
|
||||
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; BLUE='\033[0;34m'; NC='\033[0m'
|
||||
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||
log_ok() { echo -e "${GREEN}[OK]${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
log_err() { echo -e "${RED}[ERR]${NC} $1"; }
|
||||
|
||||
if [ -z "$ZONE_ID" ]; then
|
||||
log_err "CLOUDFLARE_ZONE_ID_D_BIS_ORG (or CLOUDFLARE_ZONE_ID) is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Optional: use Global API key for DNS when API token is read-only (403 on create). Set in .env or one-shot:
|
||||
# CLOUDFLARE_DNS_PREFER_GLOBAL_KEY=1
|
||||
CLOUDFLARE_DNS_PREFER_GLOBAL_KEY="${CLOUDFLARE_DNS_PREFER_GLOBAL_KEY:-0}"
|
||||
if [ "$CLOUDFLARE_DNS_PREFER_GLOBAL_KEY" = 1 ] && [ -n "${CLOUDFLARE_EMAIL:-}" ] && [ -n "${CLOUDFLARE_API_KEY:-}" ]; then
|
||||
cf_auth=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY" -H "Content-Type: application/json")
|
||||
log_info "Using Global API Key (CLOUDFLARE_DNS_PREFER_GLOBAL_KEY=1) for Cloudflare DNS"
|
||||
elif [ -n "${CLOUDFLARE_API_TOKEN:-}" ]; then
|
||||
cf_auth=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" -H "Content-Type: application/json")
|
||||
elif [ -n "${CLOUDFLARE_EMAIL:-}" ] && [ -n "${CLOUDFLARE_API_KEY:-}" ]; then
|
||||
cf_auth=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY" -H "Content-Type: application/json")
|
||||
log_info "Using Global API Key for Cloudflare DNS"
|
||||
else
|
||||
log_err "Need CLOUDFLARE_API_TOKEN or CLOUDFLARE_EMAIL+CLOUDFLARE_API_KEY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# No curl -f: 403/4xx return bodies; command substitution must not trip set -e inside upsert_record
|
||||
cf_post() { curl -sS -X POST "${cf_auth[@]}" --data "$2" "$1"; }
|
||||
cf_put() { curl -sS -X PUT "${cf_auth[@]}" --data "$2" "$1"; }
|
||||
|
||||
# List DNS by name+type; returns JSON result array
|
||||
cf_list_records() {
|
||||
local type="$1" name="$2"
|
||||
curl -sS -X GET "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records?type=${type}&name=${name}" \
|
||||
"${cf_auth[@]}" 2>/dev/null || echo '{"success":false}'
|
||||
}
|
||||
|
||||
# Upsert A/TXT/MX (MX uses priority in payload)
|
||||
upsert_record() {
|
||||
local rtype="$1" name="$2" content="$3" priority="${4:-}"
|
||||
local data id
|
||||
if [ "$DRY" = 1 ]; then
|
||||
log_info "[dry-run] $rtype $name → $content ${priority:+pri=$priority}"
|
||||
return 0
|
||||
fi
|
||||
local list
|
||||
list=$(cf_list_records "$rtype" "$name")
|
||||
id=$(echo "$list" | jq -r '(.result[0].id) // empty' 2>/dev/null || true)
|
||||
if [ "$rtype" = "MX" ]; then
|
||||
data=$(jq -n \
|
||||
--arg t "$rtype" --arg n "$name" --arg c "$content" --argjson p "${priority:-10}" \
|
||||
'{type:$t, name:$n, content:$c, priority:($p|tonumber), ttl:1}')
|
||||
elif [ "$rtype" = "TXT" ]; then
|
||||
data=$(jq -n --arg t "$rtype" --arg n "$name" --arg c "$content" \
|
||||
'{type:$t, name:$n, content:$c, ttl:1}')
|
||||
else
|
||||
data=$(jq -n --arg t "$rtype" --arg n "$name" --arg c "$content" \
|
||||
'{type:$t, name:$n, content:$c, ttl:1, proxied:false}')
|
||||
fi
|
||||
|
||||
if [ -n "$id" ] && [ "$id" != "null" ]; then
|
||||
local resp
|
||||
if ! resp=$(cf_put "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records/${id}" "$data"); then
|
||||
log_err "update $name: curl error"
|
||||
return 1
|
||||
fi
|
||||
echo "$resp" | jq -e '.success' >/dev/null 2>&1 && log_ok "Updated $rtype $name" && return 0
|
||||
fi
|
||||
local resp2
|
||||
if ! resp2=$(cf_post "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records" "$data"); then
|
||||
log_err "create $name: network error"
|
||||
return 1
|
||||
fi
|
||||
echo "$resp2" | jq -e '.success' >/dev/null 2>&1 && log_ok "Created $rtype $name" && return 0
|
||||
log_err "Cloudflare: $(echo "$resp2" | jq -c '.errors' 2>/dev/null || echo "$resp2")"
|
||||
log_info "403: token may lack Zone.DNS:Edit. 10000: invalid email/key or wrong account — rotate CLOUDFLARE_API_KEY, or one-shot CLOUDFLARE_DNS_PREFER_GLOBAL_KEY=1 with a valid Global Key. Or add the A in Cloudflare DNS UI."
|
||||
return 1
|
||||
}
|
||||
|
||||
log_info "Zone: $ZONE_NAME ($ZONE_ID) PUBLIC_IP=$PUBLIC_IP mail FQDN=$MAIL_FQDN"
|
||||
if [ "$PROVISION_MX" = 1 ]; then
|
||||
log_info "PROVISION_MX=1: will set MX $MX_TARGET (pri $MX_PRI) on $ZONE_NAME"
|
||||
else
|
||||
log_info "PROVISION_MX=0: skipping MX (set PROVISION_MX=1 when moving inbound mail; existing Zoho/M365 left intact)"
|
||||
fi
|
||||
|
||||
# 1) A: mail.d-bis.org (NPM/PMG web; DNS-only so SMTP can reach origin if you forward ports)
|
||||
upsert_record "A" "$MAIL_FQDN" "$PUBLIC_IP" || true
|
||||
|
||||
# 2) MX: apex → mail exchanger (optional)
|
||||
if [ "$PROVISION_MX" = 1 ]; then
|
||||
upsert_record "MX" "$ZONE_NAME" "$MX_TARGET" "$MX_PRI" || log_warn "MX upsert failed (check token permissions / duplicate)"
|
||||
fi
|
||||
|
||||
# 3) SPF (optional — default off: apex often has many TXT; upsert of first match can clobber the wrong record)
|
||||
if [ "$PROVISION_SPF" = 1 ] && [ -n "$SPF_TXT" ]; then
|
||||
log_warn "PROVISION_SPF=1: upserting apex TXT; verify this matches your mail provider (Zoho, etc.)"
|
||||
upsert_record "TXT" "$ZONE_NAME" "$SPF_TXT" || log_warn "SPF TXT failed"
|
||||
fi
|
||||
|
||||
# 4) Optional DMARC
|
||||
if [ "$PROVISION_DMARC" = 1 ] && [ -n "$DMARC_TXT" ]; then
|
||||
upsert_record "TXT" "_dmarc.$ZONE_NAME" "$DMARC_TXT" || log_warn "DMARC TXT failed"
|
||||
fi
|
||||
|
||||
# 5) Cloudflare Origin CA (for Full (strict) between CF and origin)
|
||||
ORIGIN_CERT_PEM=""; ORIGIN_KEY_PEM=""
|
||||
if [ "$PROVISION_CF_ORIGIN_CERT" = 1 ]; then
|
||||
if [ "$DRY" = 1 ]; then
|
||||
log_info "[dry-run] would POST /zones/.../origin_certificates for hostnames [$MAIL_FQDN]"
|
||||
else
|
||||
log_info "Requesting Cloudflare Origin CA certificate (API)..."
|
||||
OC_REQ=$(jq -n --arg h "$MAIL_FQDN" \
|
||||
'{hostnames:[$h], request_type:"origin-rsa", requested_validity:5475}')
|
||||
|
||||
OC_RES=$(curl -sS -X POST "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/origin_certificates" \
|
||||
"${cf_auth[@]}" -d "$OC_REQ") || true
|
||||
|
||||
if echo "$OC_RES" | jq -e '.success == true' >/dev/null 2>&1; then
|
||||
ORIGIN_CERT_PEM=$(echo "$OC_RES" | jq -r '.result.certificate // empty')
|
||||
ORIGIN_KEY_PEM=$(echo "$OC_RES" | jq -r '.result.private_key // empty')
|
||||
mkdir -p "$CERT_OUT_DIR"
|
||||
umask 077
|
||||
echo "$ORIGIN_CERT_PEM" > "$CERT_OUT_DIR/${MAIL_SUB}-origin-${ZONE_NAME}.fullchain.pem"
|
||||
echo "$ORIGIN_KEY_PEM" > "$CERT_OUT_DIR/${MAIL_SUB}-origin-${ZONE_NAME}.key.pem"
|
||||
log_ok "Wrote $CERT_OUT_DIR/${MAIL_SUB}-origin-${ZONE_NAME}.{fullchain.pem,key.pem}"
|
||||
log_warn "Set Cloudflare SSL mode to Full (strict) for $ZONE_NAME when using Origin CA on NPM."
|
||||
else
|
||||
log_err "Origin CA API failed: $(echo "$OC_RES" | jq -c '{success,errors}')"
|
||||
log_info "If token lacks SSL:Edit, create Origin CA in Cloudflare UI → SSL/TLS → Origin Server → Create."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# 6) NPMplus: proxy host + custom cert
|
||||
if [ "$PROVISION_NPM" != 1 ]; then
|
||||
log_ok "Done (Cloudflare DNS${PROVISION_CF_ORIGIN_CERT:+/Origin CA} only; set PROVISION_NPM=1 for NPMplus)."
|
||||
exit 0
|
||||
fi
|
||||
if [ "$DRY" = 1 ]; then
|
||||
log_info "[dry-run] NPM: would ensure proxy host $MAIL_FQDN → http://${IP_MAIL_UP}:${PORT_MAIL_UP}"
|
||||
log_ok "Done."
|
||||
exit 0
|
||||
fi
|
||||
if [ -z "${NPM_EMAIL:-}" ] || [ -z "${NPM_PASSWORD:-}" ]; then
|
||||
log_err "Set NPM_EMAIL and NPM_PASSWORD in .env for PROVISION_NPM=1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl_npm() { curl -s -k -L --connect-timeout 10 --max-time 120 "$@"; }
|
||||
AUTH_JSON=$(jq -n --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')
|
||||
TR=$(curl_npm -X POST "$NPM_URL/api/tokens" -H "Content-Type: application/json" -d "$AUTH_JSON")
|
||||
TOK=$(echo "$TR" | jq -r '.token // empty')
|
||||
if [ -z "$TOK" ] || [ "$TOK" = "null" ]; then
|
||||
log_err "NPM auth failed: $(echo "$TR" | jq -c .)"
|
||||
exit 1
|
||||
fi
|
||||
log_ok "NPMplus authenticated"
|
||||
|
||||
NPM_BLOCK_JSON="$([ "$MAIL_NPM_BLOCK_EXPLOITS" = 1 ] && echo true || echo false)"
|
||||
|
||||
# Find existing host for this domain
|
||||
HOSTS=$(curl_npm -X GET "$NPM_URL/api/nginx/proxy-hosts" -H "Authorization: Bearer $TOK" -H "Content-Type: application/json")
|
||||
HOST_ID=$(echo "$HOSTS" | jq -r --arg d "$MAIL_FQDN" '.[] | select((.domain_names|index($d)) != null) | .id' 2>/dev/null | head -1)
|
||||
CERT_ID_TO_USE="null"
|
||||
|
||||
if [ -n "$ORIGIN_CERT_PEM" ] && [ -n "$ORIGIN_KEY_PEM" ]; then
|
||||
log_info "Uploading custom certificate to NPMplus..."
|
||||
# PEM in --arg (NPM/zoeyvid: provider other + certificate + certificate_key; large tokens OK in modern bash)
|
||||
CERT_JSON=$(jq -n \
|
||||
--arg cert "$ORIGIN_CERT_PEM" --arg key "$ORIGIN_KEY_PEM" --arg m "$MAIL_FQDN" \
|
||||
'{provider:"other", nice_name:("Cloudflare Origin: "+$m), domain_names:[$m], certificate:$cert, certificate_key:$key, meta:{}}' )
|
||||
|
||||
CR=$(curl_npm -X POST "$NPM_URL/api/nginx/certificates" -H "Authorization: Bearer $TOK" -H "Content-Type: application/json" -d "$CERT_JSON")
|
||||
CERT_ID_TO_USE=$(echo "$CR" | jq -r '.id // empty')
|
||||
if [ -n "$CERT_ID_TO_USE" ] && [ "$CERT_ID_TO_USE" != "null" ]; then
|
||||
log_ok "NPM certificate id: $CERT_ID_TO_USE"
|
||||
else
|
||||
log_warn "NPM custom cert upload failed: $(echo "$CR" | jq -c .) — set cert in UI from $CERT_OUT_DIR"
|
||||
CERT_ID_TO_USE="null"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create or update proxy host
|
||||
if [ -z "$HOST_ID" ] || [ "$HOST_ID" = "null" ]; then
|
||||
log_info "Creating proxy host $MAIL_FQDN → http://${IP_MAIL_UP}:${PORT_MAIL_UP} ..."
|
||||
if [ -n "$CERT_ID_TO_USE" ] && [ "$CERT_ID_TO_USE" != "null" ]; then
|
||||
PH=$(jq -n --arg d "$MAIL_FQDN" --arg h "$IP_MAIL_UP" --argjson p "$PORT_MAIL_UP" --argjson be "$NPM_BLOCK_JSON" --argjson cid "$CERT_ID_TO_USE" \
|
||||
'{domain_names:[$d], forward_scheme:"http", forward_host:$h, forward_port:$p, allow_websocket_upgrade:true, block_exploits:$be, certificate_id:$cid, ssl_forced:true, http2_support:true, hsts_enabled:true, hsts_subdomains:false}')
|
||||
else
|
||||
PH=$(jq -n --arg d "$MAIL_FQDN" --arg h "$IP_MAIL_UP" --argjson p "$PORT_MAIL_UP" --argjson be "$NPM_BLOCK_JSON" \
|
||||
'{domain_names:[$d], forward_scheme:"http", forward_host:$h, forward_port:$p, allow_websocket_upgrade:true, block_exploits:$be, certificate_id:null, ssl_forced:false}')
|
||||
fi
|
||||
|
||||
PR=$(curl_npm -X POST "$NPM_URL/api/nginx/proxy-hosts" -H "Authorization: Bearer $TOK" -H "Content-Type: application/json" -d "$PH")
|
||||
NEW_ID=$(echo "$PR" | jq -r '.id // empty')
|
||||
if [ -n "$NEW_ID" ] && [ "$NEW_ID" != "null" ]; then
|
||||
log_ok "Proxy host id $NEW_ID — test https://$MAIL_FQDN"
|
||||
else
|
||||
log_err "Create host failed: $(echo "$PR" | jq -c .)"
|
||||
fi
|
||||
else
|
||||
log_info "Updating proxy host id $HOST_ID"
|
||||
if [ -n "$CERT_ID_TO_USE" ] && [ "$CERT_ID_TO_USE" != "null" ]; then
|
||||
PAYLOAD=$(curl_npm -X GET "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" -H "Authorization: Bearer $TOK" | jq \
|
||||
--arg h "$IP_MAIL_UP" --argjson p "$PORT_MAIL_UP" --argjson be "$NPM_BLOCK_JSON" --argjson cid "$CERT_ID_TO_USE" \
|
||||
'.forward_host=$h | .forward_port=$p | .forward_scheme="http" | .block_exploits=$be | .certificate_id=$cid | .ssl_forced=true | .http2_support=true')
|
||||
else
|
||||
PAYLOAD=$(curl_npm -X GET "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" -H "Authorization: Bearer $TOK" | jq \
|
||||
--arg h "$IP_MAIL_UP" --argjson p "$PORT_MAIL_UP" --argjson be "$NPM_BLOCK_JSON" \
|
||||
'.forward_host=$h | .forward_port=$p | .forward_scheme="http" | .block_exploits=$be')
|
||||
fi
|
||||
PUR=$(curl_npm -X PUT "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" -H "Authorization: Bearer $TOK" -H "Content-Type: application/json" -d "$PAYLOAD")
|
||||
if echo "$PUR" | jq -e '.id' >/dev/null 2>&1; then
|
||||
log_ok "Proxy host $HOST_ID updated"
|
||||
else
|
||||
log_warn "Update: $(echo "$PUR" | jq -c .)"
|
||||
fi
|
||||
fi
|
||||
|
||||
log_ok "Done. UDM/Cloudflare: ensure SMTP(25/587) and HTTPS forward to this stack as intended; NPM only handles HTTP/HTTPS to the backend."
|
||||
@@ -10,6 +10,7 @@ set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
|
||||
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
|
||||
|
||||
SSH_OPTS=(-o ConnectTimeout=20 -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new)
|
||||
|
||||
@@ -27,14 +28,18 @@ if [[ ! -f "$STATIC" ]] || [[ ! -f "$PERMS" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# VMID -> Proxmox host (per BESU_VMIDS_FROM_PROXMOX / list-besu-vmids-from-proxmox.sh)
|
||||
declare -A HOST_BY_VMID
|
||||
# r630-01 (192.168.11.11) — 2500-2505 removed (destroyed; see ALL_VMIDS_ENDPOINTS.md)
|
||||
for v in 1000 1001 1002 1500 1501 1502 2101; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
|
||||
# r630-02 (192.168.11.12)
|
||||
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done
|
||||
# ml110 (192.168.11.10)
|
||||
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
|
||||
host_for_vmid() {
|
||||
local vmid="$1"
|
||||
if type get_host_for_vmid >/dev/null 2>&1; then
|
||||
get_host_for_vmid "$vmid"
|
||||
elif [[ "$vmid" -le 1002 ]] || [[ "$vmid" == "1500" ]] || [[ "$vmid" == "1501" ]] || [[ "$vmid" == "1502" ]] || [[ "$vmid" == "2101" ]]; then
|
||||
echo "${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
|
||||
elif [[ "$vmid" == "2201" ]] || [[ "$vmid" == "2303" ]] || [[ "$vmid" == "2401" ]]; then
|
||||
echo "${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
|
||||
else
|
||||
echo "${PROXMOX_HOST_ML110:-192.168.11.10}"
|
||||
fi
|
||||
}
|
||||
|
||||
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403)
|
||||
|
||||
@@ -46,7 +51,7 @@ echo ""
|
||||
# Group by host to minimize scp/ssh
|
||||
declare -A VMIDS_ON_HOST
|
||||
for vmid in "${BESU_VMIDS[@]}"; do
|
||||
host="${HOST_BY_VMID[$vmid]:-}"
|
||||
host="$(host_for_vmid "$vmid")"
|
||||
[[ -z "$host" ]] && continue
|
||||
VMIDS_ON_HOST[$host]+=" $vmid"
|
||||
done
|
||||
|
||||
@@ -11,6 +11,7 @@ DEPLOY_ROOT="${DEPLOY_ROOT:-/var/www/atomic-swap}"
|
||||
TMP_ARCHIVE="/tmp/atomic-swap-dapp-5801.tgz"
|
||||
DIST_DIR="$SUBMODULE_ROOT/dist"
|
||||
SKIP_BUILD="${SKIP_BUILD:-0}"
|
||||
SSH_OPTS="${SSH_OPTS:--o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new}"
|
||||
|
||||
cleanup() {
|
||||
rm -f "$TMP_ARCHIVE"
|
||||
@@ -25,6 +26,11 @@ fi
|
||||
|
||||
cd "$SUBMODULE_ROOT"
|
||||
if [ "$SKIP_BUILD" != "1" ]; then
|
||||
if [ -f package-lock.json ]; then
|
||||
npm ci >/dev/null
|
||||
else
|
||||
npm install >/dev/null
|
||||
fi
|
||||
npm run sync:ecosystem >/dev/null
|
||||
npm run validate:manifest >/dev/null
|
||||
npm run build >/dev/null
|
||||
@@ -51,9 +57,10 @@ jq -e '.networks[] | select(.chainId == 138) | .venueCounts.deployedVenuePoolCou
|
||||
rm -f "$TMP_ARCHIVE"
|
||||
tar -C "$SUBMODULE_ROOT" -czf "$TMP_ARCHIVE" dist
|
||||
|
||||
scp -q -o StrictHostKeyChecking=accept-new "$TMP_ARCHIVE" "root@$PROXMOX_HOST:/tmp/atomic-swap-dapp-5801.tgz"
|
||||
ssh $SSH_OPTS "root@$PROXMOX_HOST" true
|
||||
scp -q $SSH_OPTS "$TMP_ARCHIVE" "root@$PROXMOX_HOST:/tmp/atomic-swap-dapp-5801.tgz"
|
||||
|
||||
ssh -o StrictHostKeyChecking=accept-new "root@$PROXMOX_HOST" "
|
||||
ssh $SSH_OPTS "root@$PROXMOX_HOST" "
|
||||
pct push $VMID /tmp/atomic-swap-dapp-5801.tgz /tmp/atomic-swap-dapp-5801.tgz
|
||||
pct exec $VMID -- bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
336
scripts/deployment/deploy-currencicombo-8604.sh
Executable file
336
scripts/deployment/deploy-currencicombo-8604.sh
Executable file
@@ -0,0 +1,336 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
|
||||
|
||||
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
|
||||
PROXMOX_USER="${PROXMOX_USER:-root}"
|
||||
VMID="${VMID:-8604}"
|
||||
CT_HOSTNAME="${CT_HOSTNAME:-currencicombo-phoenix-1}"
|
||||
CT_IP="${CT_IP:-10.160.0.14}"
|
||||
CT_PREFIX="${CT_PREFIX:-22}"
|
||||
CT_GW="${CT_GW:-10.160.0.1}"
|
||||
CT_VLAN_TAG="${CT_VLAN_TAG:-160}"
|
||||
CT_STORAGE="${CT_STORAGE:-thin1}"
|
||||
CT_TEMPLATE="${CT_TEMPLATE:-ubuntu-22.04-standard_22.04-1_amd64.tar.zst}"
|
||||
CT_MEMORY_MB="${CT_MEMORY_MB:-6144}"
|
||||
CT_CORES="${CT_CORES:-4}"
|
||||
CT_ROOTFS_GB="${CT_ROOTFS_GB:-40}"
|
||||
CT_SWAP_MB="${CT_SWAP_MB:-1024}"
|
||||
CT_TIMEZONE="${CT_TIMEZONE:-America/Los_Angeles}"
|
||||
CT_NODE_ENV="${CT_NODE_ENV:-production}"
|
||||
DEPLOY_ROOT="${DEPLOY_ROOT:-/opt/currencicombo}"
|
||||
REPO_URL="${REPO_URL:-https://gitea.d-bis.org/d-bis/CurrenciCombo.git}"
|
||||
REPO_BRANCH="${REPO_BRANCH:-main}"
|
||||
REPO_REF="${REPO_REF:-}"
|
||||
LOCAL_SRC="${CURRENCICOMBO_SRC:-}"
|
||||
POSTGRES_DB="${POSTGRES_DB:-comboflow}"
|
||||
POSTGRES_USER="${POSTGRES_USER:-comboflow}"
|
||||
POSTGRES_PASSWORD="${POSTGRES_PASSWORD:-comboflow-prod-please-rotate}"
|
||||
REDIS_URL="${REDIS_URL:-redis://127.0.0.1:6379}"
|
||||
ORCH_PORT="${ORCH_PORT:-8080}"
|
||||
WEB_PORT="${WEB_PORT:-3000}"
|
||||
NEXTAUTH_URL="${NEXTAUTH_URL:-http://${CT_IP}:${WEB_PORT}}"
|
||||
NEXT_PUBLIC_ORCH_URL="${NEXT_PUBLIC_ORCH_URL:-http://${CT_IP}:${ORCH_PORT}}"
|
||||
SESSION_SECRET="${SESSION_SECRET:-currencicombo-session-secret-change-me-32chars}"
|
||||
JWT_SECRET="${JWT_SECRET:-currencicombo-jwt-secret-change-me-32chars}"
|
||||
API_KEYS="${API_KEYS:-currencicombo-phoenix-dev-key}"
|
||||
SSH_OPTS=(-o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
|
||||
DRY_RUN=0
|
||||
APPLY=0
|
||||
SKIP_CREATE=0
|
||||
SKIP_SYSTEM_PACKAGES=0
|
||||
|
||||
TMP_ARCHIVE="$(mktemp /tmp/currencicombo-8604-XXXXXX.tgz)"
|
||||
REMOTE_ARCHIVE="/tmp/$(basename "$TMP_ARCHIVE")"
|
||||
PUSH_ARCHIVE="/root/$(basename "$TMP_ARCHIVE")"
|
||||
|
||||
usage() {
|
||||
cat <<USAGE
|
||||
Usage: $(basename "$0") [--dry-run] [--apply] [--skip-create] [--skip-system-packages]
|
||||
|
||||
Deploy CurrenciCombo to Phoenix CT ${VMID} on ${PROXMOX_HOST}.
|
||||
|
||||
Options:
|
||||
--dry-run Print planned actions without changing anything.
|
||||
--apply Execute the deployment.
|
||||
--skip-create Assume the CT already exists; do not create it.
|
||||
--skip-system-packages Skip apt/node/postgres/redis installation inside the CT.
|
||||
USAGE
|
||||
}
|
||||
|
||||
log() { echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*"; }
|
||||
fail() { echo "ERROR: $*" >&2; exit 1; }
|
||||
|
||||
run_local() {
|
||||
if [[ "$DRY_RUN" == "1" ]]; then
|
||||
printf '[dry-run] '
|
||||
printf '%q ' "$@"
|
||||
printf '\n'
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
run_remote() {
|
||||
local cmd="$1"
|
||||
if [[ "$DRY_RUN" == "1" ]]; then
|
||||
echo "[dry-run] ssh ${PROXMOX_USER}@${PROXMOX_HOST} $cmd"
|
||||
else
|
||||
ssh "${SSH_OPTS[@]}" "${PROXMOX_USER}@${PROXMOX_HOST}" "$cmd"
|
||||
fi
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
rm -f "$TMP_ARCHIVE"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--dry-run) DRY_RUN=1 ;;
|
||||
--apply) APPLY=1 ;;
|
||||
--skip-create) SKIP_CREATE=1 ;;
|
||||
--skip-system-packages) SKIP_SYSTEM_PACKAGES=1 ;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*) fail "Unknown argument: $1" ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ "$DRY_RUN" == "0" && "$APPLY" == "0" ]]; then
|
||||
fail "Refusing to make changes without --apply. Use --dry-run to preview."
|
||||
fi
|
||||
|
||||
build_archive_from_local() {
|
||||
local src="$1"
|
||||
[[ -d "$src" ]] || fail "Local source repo not found: $src"
|
||||
log "Packing local source from $src"
|
||||
tar -C "$src" \
|
||||
--exclude='.git' \
|
||||
--exclude='node_modules' \
|
||||
--exclude='webapp/node_modules' \
|
||||
--exclude='orchestrator/node_modules' \
|
||||
--exclude='contracts/node_modules' \
|
||||
--exclude='webapp/.next' \
|
||||
--exclude='orchestrator/dist' \
|
||||
--exclude='.env' \
|
||||
--exclude='.env.local' \
|
||||
--exclude='webapp/.env.local' \
|
||||
-czf "$TMP_ARCHIVE" .
|
||||
}
|
||||
|
||||
ensure_archive() {
|
||||
if [[ -n "$LOCAL_SRC" ]]; then
|
||||
build_archive_from_local "$LOCAL_SRC"
|
||||
return
|
||||
fi
|
||||
|
||||
local scratch
|
||||
scratch="$(mktemp -d /tmp/currencicombo-src-XXXXXX)"
|
||||
trap 'rm -rf "$scratch"; cleanup' EXIT
|
||||
log "Cloning $REPO_URL#$REPO_BRANCH to build deploy archive"
|
||||
git clone --depth=1 --branch "$REPO_BRANCH" "$REPO_URL" "$scratch/repo" >/dev/null 2>&1
|
||||
if [[ -n "$REPO_REF" ]]; then
|
||||
git -C "$scratch/repo" fetch --depth=1 origin "$REPO_REF" >/dev/null 2>&1 || true
|
||||
git -C "$scratch/repo" checkout "$REPO_REF" >/dev/null 2>&1
|
||||
fi
|
||||
build_archive_from_local "$scratch/repo"
|
||||
}
|
||||
|
||||
ensure_ct() {
|
||||
local exists_cmd="pct status ${VMID} >/dev/null 2>&1"
|
||||
if ssh "${SSH_OPTS[@]}" "${PROXMOX_USER}@${PROXMOX_HOST}" "$exists_cmd" >/dev/null 2>&1; then
|
||||
log "CT ${VMID} already exists on ${PROXMOX_HOST}"
|
||||
return
|
||||
fi
|
||||
[[ "$SKIP_CREATE" == "0" ]] || fail "CT ${VMID} does not exist and --skip-create was set"
|
||||
local create_cmd="pct create ${VMID} local:vztmpl/${CT_TEMPLATE} --storage ${CT_STORAGE} --hostname ${CT_HOSTNAME} --memory ${CT_MEMORY_MB} --cores ${CT_CORES} --rootfs ${CT_STORAGE}:${CT_ROOTFS_GB} --net0 name=eth0,bridge=vmbr0,tag=${CT_VLAN_TAG},ip=${CT_IP}/${CT_PREFIX},gw=${CT_GW},type=veth --unprivileged 1 --swap ${CT_SWAP_MB} --onboot 1 --timezone ${CT_TIMEZONE} --features nesting=1,keyctl=1"
|
||||
log "Creating CT ${VMID} (${CT_HOSTNAME})"
|
||||
run_remote "$create_cmd"
|
||||
}
|
||||
|
||||
ensure_ct_running() {
|
||||
local status
|
||||
status="$(ssh "${SSH_OPTS[@]}" "${PROXMOX_USER}@${PROXMOX_HOST}" "pct status ${VMID} 2>/dev/null | awk '{print \$2}'" || true)"
|
||||
if [[ "$status" != "running" ]]; then
|
||||
log "Starting CT ${VMID}"
|
||||
run_remote "pct start ${VMID}"
|
||||
if [[ "$DRY_RUN" == "0" ]]; then
|
||||
sleep 10
|
||||
fi
|
||||
else
|
||||
log "CT ${VMID} already running"
|
||||
fi
|
||||
}
|
||||
|
||||
push_archive() {
|
||||
log "Uploading deploy archive to ${PROXMOX_HOST}"
|
||||
run_local scp "${SSH_OPTS[@]}" "$TMP_ARCHIVE" "${PROXMOX_USER}@${PROXMOX_HOST}:${REMOTE_ARCHIVE}"
|
||||
run_remote "pct push ${VMID} ${REMOTE_ARCHIVE} ${PUSH_ARCHIVE}"
|
||||
run_remote "rm -f ${REMOTE_ARCHIVE}"
|
||||
}
|
||||
|
||||
run_ct_script() {
|
||||
local body
|
||||
body="$(cat <<'INNER'
|
||||
set -euo pipefail
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
DEPLOY_ROOT='__DEPLOY_ROOT__'
|
||||
POSTGRES_DB='__POSTGRES_DB__'
|
||||
POSTGRES_USER='__POSTGRES_USER__'
|
||||
POSTGRES_PASSWORD='__POSTGRES_PASSWORD__'
|
||||
REDIS_URL='__REDIS_URL__'
|
||||
ORCH_PORT='__ORCH_PORT__'
|
||||
WEB_PORT='__WEB_PORT__'
|
||||
NEXTAUTH_URL='__NEXTAUTH_URL__'
|
||||
NEXT_PUBLIC_ORCH_URL='__NEXT_PUBLIC_ORCH_URL__'
|
||||
SESSION_SECRET='__SESSION_SECRET__'
|
||||
JWT_SECRET='__JWT_SECRET__'
|
||||
API_KEYS='__API_KEYS__'
|
||||
CT_NODE_ENV='__CT_NODE_ENV__'
|
||||
PUSH_ARCHIVE='__PUSH_ARCHIVE__'
|
||||
SKIP_SYSTEM_PACKAGES='__SKIP_SYSTEM_PACKAGES__'
|
||||
|
||||
if [[ "$SKIP_SYSTEM_PACKAGES" != "1" ]]; then
|
||||
apt-get update -qq
|
||||
apt-get install -y -qq ca-certificates curl gnupg git rsync build-essential postgresql redis-server
|
||||
if ! command -v node >/dev/null 2>&1 || [[ "$(node -v 2>/dev/null || true)" != v20* ]]; then
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
|
||||
apt-get install -y -qq nodejs
|
||||
fi
|
||||
fi
|
||||
|
||||
systemctl enable postgresql redis-server >/dev/null 2>&1 || true
|
||||
systemctl restart postgresql redis-server
|
||||
|
||||
install -d -m 0755 "$DEPLOY_ROOT"
|
||||
rm -rf /tmp/currencicombo-incoming
|
||||
mkdir -p /tmp/currencicombo-incoming
|
||||
tar -xzf "$PUSH_ARCHIVE" -C /tmp/currencicombo-incoming
|
||||
rsync -a --delete /tmp/currencicombo-incoming/ "$DEPLOY_ROOT/"
|
||||
rm -rf /tmp/currencicombo-incoming "$PUSH_ARCHIVE"
|
||||
|
||||
cat > "$DEPLOY_ROOT/orchestrator/.env" <<ENV
|
||||
PORT=$ORCH_PORT
|
||||
NODE_ENV=$CT_NODE_ENV
|
||||
LOG_LEVEL=info
|
||||
API_KEYS=$API_KEYS
|
||||
SESSION_SECRET=$SESSION_SECRET
|
||||
JWT_SECRET=$JWT_SECRET
|
||||
ALLOWED_IPS=127.0.0.1,::1
|
||||
RUN_MIGRATIONS=false
|
||||
DATABASE_URL=postgresql://$POSTGRES_USER:$POSTGRES_PASSWORD@127.0.0.1:5432/$POSTGRES_DB
|
||||
REDIS_URL=$REDIS_URL
|
||||
ENV
|
||||
|
||||
cat > "$DEPLOY_ROOT/webapp/.env.local" <<ENV
|
||||
NEXTAUTH_URL=$NEXTAUTH_URL
|
||||
NEXTAUTH_SECRET=$SESSION_SECRET
|
||||
NEXT_PUBLIC_ORCH_URL=$NEXT_PUBLIC_ORCH_URL
|
||||
ENV
|
||||
|
||||
runuser -u postgres -- psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='${POSTGRES_USER}'" | grep -q 1 || \
|
||||
runuser -u postgres -- psql -c "CREATE USER ${POSTGRES_USER} WITH PASSWORD '${POSTGRES_PASSWORD}';"
|
||||
runuser -u postgres -- psql -tAc "SELECT 1 FROM pg_database WHERE datname='${POSTGRES_DB}'" | grep -q 1 || \
|
||||
runuser -u postgres -- psql -c "CREATE DATABASE ${POSTGRES_DB} OWNER ${POSTGRES_USER};"
|
||||
runuser -u postgres -- psql -d "$POSTGRES_DB" -c "GRANT ALL ON SCHEMA public TO ${POSTGRES_USER};" >/dev/null
|
||||
|
||||
cd "$DEPLOY_ROOT/orchestrator"
|
||||
npm ci
|
||||
RUN_MIGRATIONS=true npm run migrate
|
||||
npm run build
|
||||
|
||||
cd "$DEPLOY_ROOT/webapp"
|
||||
npm ci
|
||||
npm run build
|
||||
|
||||
cat > /etc/systemd/system/currencicombo-orchestrator.service <<UNIT
|
||||
[Unit]
|
||||
Description=CurrenciCombo Orchestrator
|
||||
After=network-online.target postgresql.service redis-server.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=$DEPLOY_ROOT/orchestrator
|
||||
Environment=NODE_ENV=$CT_NODE_ENV
|
||||
Environment=PORT=$ORCH_PORT
|
||||
ExecStart=/usr/bin/npm start
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
UNIT
|
||||
|
||||
cat > /etc/systemd/system/currencicombo-webapp.service <<UNIT
|
||||
[Unit]
|
||||
Description=CurrenciCombo Webapp
|
||||
After=network-online.target currencicombo-orchestrator.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=$DEPLOY_ROOT/webapp
|
||||
Environment=NODE_ENV=$CT_NODE_ENV
|
||||
Environment=PORT=$WEB_PORT
|
||||
ExecStart=/usr/bin/npm start
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
UNIT
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable currencicombo-orchestrator currencicombo-webapp >/dev/null
|
||||
systemctl restart currencicombo-orchestrator currencicombo-webapp
|
||||
sleep 8
|
||||
curl -fsS "http://127.0.0.1:${ORCH_PORT}/health" >/dev/null
|
||||
curl -fsS "http://127.0.0.1:${WEB_PORT}/" >/dev/null
|
||||
INNER
|
||||
)"
|
||||
body="${body//__DEPLOY_ROOT__/$DEPLOY_ROOT}"
|
||||
body="${body//__POSTGRES_DB__/$POSTGRES_DB}"
|
||||
body="${body//__POSTGRES_USER__/$POSTGRES_USER}"
|
||||
body="${body//__POSTGRES_PASSWORD__/$POSTGRES_PASSWORD}"
|
||||
body="${body//__REDIS_URL__/$REDIS_URL}"
|
||||
body="${body//__ORCH_PORT__/$ORCH_PORT}"
|
||||
body="${body//__WEB_PORT__/$WEB_PORT}"
|
||||
body="${body//__NEXTAUTH_URL__/$NEXTAUTH_URL}"
|
||||
body="${body//__NEXT_PUBLIC_ORCH_URL__/$NEXT_PUBLIC_ORCH_URL}"
|
||||
body="${body//__SESSION_SECRET__/$SESSION_SECRET}"
|
||||
body="${body//__JWT_SECRET__/$JWT_SECRET}"
|
||||
body="${body//__API_KEYS__/$API_KEYS}"
|
||||
body="${body//__CT_NODE_ENV__/$CT_NODE_ENV}"
|
||||
body="${body//__PUSH_ARCHIVE__/$PUSH_ARCHIVE}"
|
||||
body="${body//__SKIP_SYSTEM_PACKAGES__/$SKIP_SYSTEM_PACKAGES}"
|
||||
|
||||
if [[ "$DRY_RUN" == "1" ]]; then
|
||||
echo "[dry-run] pct exec ${VMID} -- bash -lc '<currencicombo bootstrap script>'"
|
||||
else
|
||||
ssh "${SSH_OPTS[@]}" "${PROXMOX_USER}@${PROXMOX_HOST}" "pct exec ${VMID} -- bash -lc $(printf '%q' "$body")"
|
||||
fi
|
||||
}
|
||||
|
||||
verify_from_host() {
|
||||
local cmd="pct exec ${VMID} -- bash -lc 'curl -fsS http://127.0.0.1:${ORCH_PORT}/health >/dev/null && curl -fsS http://127.0.0.1:${WEB_PORT}/ >/dev/null && systemctl is-active currencicombo-orchestrator currencicombo-webapp'"
|
||||
run_remote "$cmd"
|
||||
}
|
||||
|
||||
ensure_archive
|
||||
if [[ "$DRY_RUN" == "0" ]]; then
|
||||
[[ -s "$TMP_ARCHIVE" ]] || fail "Failed to create deploy archive"
|
||||
fi
|
||||
ensure_ct
|
||||
ensure_ct_running
|
||||
push_archive
|
||||
run_ct_script
|
||||
verify_from_host
|
||||
|
||||
log "CurrenciCombo deploy complete: CT ${VMID} on ${PROXMOX_HOST} (${CT_IP})"
|
||||
log "Web: http://${CT_IP}:${WEB_PORT}/"
|
||||
log "API: http://${CT_IP}:${ORCH_PORT}/health"
|
||||
@@ -42,7 +42,8 @@ if [[ -z "$DEPLOYER" && -n "${PRIVATE_KEY:-}" ]] && command -v cast &>/dev/null;
|
||||
fi
|
||||
DEPLOYER="${DEPLOYER:-0x4A666F96fC8764181194447A7dFdb7d471b301C8}"
|
||||
|
||||
# Map chainId to RPC env var name (from check-balances-gas-and-deploy.sh)
|
||||
# Map chainId to RPC. Prefer env; Wemix/Alltra/Etherlink have documented public fallbacks
|
||||
# for read-only balance checks (overrides with WEMIX_RPC_URL / CHAIN_651940_RPC_URL / ETHERLINK_RPC_URL).
|
||||
get_rpc_for_chain() {
|
||||
local c="$1"
|
||||
case "$c" in
|
||||
@@ -57,8 +58,9 @@ get_rpc_for_chain() {
|
||||
100) echo "${GNOSIS_MAINNET_RPC:-${GNOSIS_RPC:-}}" ;;
|
||||
25) echo "${CRONOS_RPC_URL:-}" ;;
|
||||
42220) echo "${CELO_RPC_URL:-}" ;;
|
||||
1111) echo "${WEMIX_RPC_URL:-}" ;;
|
||||
651940) echo "${ALLTRA_MAINNET_RPC:-}" ;;
|
||||
1111) echo "${WEMIX_RPC_URL:-${WEMIX_RPC:-https://api.wemix.com}}" ;;
|
||||
651940) echo "${ALLTRA_MAINNET_RPC:-${CHAIN_651940_RPC_URL:-${CHAIN_651940_RPC:-${ALL_MAINNET_RPC:-https://mainnet-rpc.alltra.global}}}}" ;;
|
||||
42793) echo "${ETHERLINK_RPC_URL:-${RPC_URL_42793:-https://node.mainnet.etherlink.com}}" ;;
|
||||
*) echo "" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
89
scripts/deployment/gitea-cloudflare-sync.sh
Executable file
89
scripts/deployment/gitea-cloudflare-sync.sh
Executable file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env bash
|
||||
# Run from Phoenix deploy API (child of gitea Cloudflare job) on the host that holds
|
||||
# PHOENIX_REPO_ROOT + .env — not from Gitea cloud runners (wrong public IP / no .env).
|
||||
#
|
||||
# Gates:
|
||||
# PHOENIX_CLOUDFLARE_SYNC=1|true — must be set in repo .env and/or phoenix systemd (safety)
|
||||
# Path filter: only commits touching DNS/CF-related paths (unless target is cloudflare-sync-force)
|
||||
# Env (optional):
|
||||
# CLOUDFLARE_GITEA_SYNC_ZONE — default d-bis.org (passed as --zone-only=… to update script)
|
||||
# PHOENIX_DEPLOY_TARGET — set by phoenix; cloudflare-sync-force skips path filter
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ROOT="${PHOENIX_REPO_ROOT:-}"
|
||||
if [[ -z "$ROOT" ]]; then
|
||||
echo "gitea-cloudflare-sync: PHOENIX_REPO_ROOT is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
UPDATE_SCRIPT="${ROOT}/scripts/update-all-dns-to-public-ip.sh"
|
||||
|
||||
if [[ ! -f "$UPDATE_SCRIPT" ]]; then
|
||||
echo "gitea-cloudflare-sync: missing ${UPDATE_SCRIPT}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
if [[ -f "$ROOT/.env" ]]; then
|
||||
set +u
|
||||
# shellcheck source=/dev/null
|
||||
source "$ROOT/.env"
|
||||
set -u
|
||||
fi
|
||||
|
||||
target="${PHOENIX_DEPLOY_TARGET:-cloudflare-sync}"
|
||||
force_sync=0
|
||||
if [[ "$target" == "cloudflare-sync-force" ]] || [[ "${PHOENIX_FORCE_CLOUDFLARE_SYNC:-0}" == "1" ]]; then
|
||||
force_sync=1
|
||||
fi
|
||||
|
||||
sync_on="${PHOENIX_CLOUDFLARE_SYNC:-0}"
|
||||
if [[ "$sync_on" != "1" && "${sync_on,,}" != "true" ]]; then
|
||||
echo "gitea-cloudflare-sync: skip (set PHOENIX_CLOUDFLARE_SYNC=1 in .env or phoenix environment)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z "${PHOENIX_DEPLOY_SHA:-}" && "$force_sync" -eq 0 ]]; then
|
||||
echo "gitea-cloudflare-sync: skip (no PHOENIX_DEPLOY_SHA; use target cloudflare-sync-force to run anyway)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
matches_cloudflare_paths() {
|
||||
local sha="$1"
|
||||
local out
|
||||
if ! git -C "$ROOT" rev-parse --verify "${sha}^{commit}" >/dev/null 2>&1; then
|
||||
echo "gitea-cloudflare-sync: skip (commit ${sha:0:12}… not in ${ROOT} — git pull there or use cloudflare-sync-force)" >&2
|
||||
return 1
|
||||
fi
|
||||
if ! out=$(git -C "$ROOT" show --name-only --format="" "$sha" 2>/dev/null); then
|
||||
echo "gitea-cloudflare-sync: skip (git show ${sha:0:12}… failed — use cloudflare-sync-force)" >&2
|
||||
return 1
|
||||
fi
|
||||
if [[ -z "$out" ]]; then
|
||||
return 1
|
||||
fi
|
||||
while IFS= read -r f; do
|
||||
[[ -z "$f" ]] && continue
|
||||
if [[ "$f" == scripts/cloudflare/* ||
|
||||
"$f" == scripts/deployment/gitea-cloudflare-sync.sh ||
|
||||
"$f" == scripts/update-all-dns-to-public-ip.sh ||
|
||||
"$f" == phoenix-deploy-api/deploy-targets.json ||
|
||||
"$f" == config/ip-addresses.conf ]]; then
|
||||
return 0
|
||||
fi
|
||||
done <<<"$out"
|
||||
return 1
|
||||
}
|
||||
|
||||
if [[ "$force_sync" -eq 0 ]]; then
|
||||
if ! matches_cloudflare_paths "$PHOENIX_DEPLOY_SHA"; then
|
||||
echo "gitea-cloudflare-sync: no Cloudflare/DNS-related files in ${PHOENIX_DEPLOY_SHA:0:12}…; skip (use cloudflare-sync-force to override)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
zone="${CLOUDFLARE_GITEA_SYNC_ZONE:-d-bis.org}"
|
||||
echo "gitea-cloudflare-sync: applying DNS sync for zone ${zone} (force=${force_sync})"
|
||||
|
||||
exec bash "$UPDATE_SCRIPT" "--zone-only=${zone}"
|
||||
244
scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh
Executable file
244
scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh
Executable file
@@ -0,0 +1,244 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
source "$PROJECT_ROOT/scripts/lib/load-project-env.sh"
|
||||
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
|
||||
|
||||
PHOENIX_DEPLOY_WORKSPACE="${PHOENIX_DEPLOY_WORKSPACE:-}"
|
||||
PROXMOX_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
|
||||
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-root}"
|
||||
VMID="${CURRENCICOMBO_PHOENIX_VMID:-8604}"
|
||||
CT_IP="${IP_CURRENCICOMBO_PHOENIX:-10.160.0.14}"
|
||||
CT_REPO_DIR="${CT_REPO_DIR:-/var/lib/currencicombo/repo}"
|
||||
PUBLIC_URL="${PUBLIC_URL:-https://curucombo.xn--vov0g.com}"
|
||||
PUBLIC_DOMAIN="${PUBLIC_DOMAIN:-curucombo.xn--vov0g.com}"
|
||||
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS:-192.168.11.167}:81}"
|
||||
NPM_EMAIL="${NPM_EMAIL:-}"
|
||||
NPM_PASSWORD="${NPM_PASSWORD:-}"
|
||||
DRY_RUN=0
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Usage: phoenix-deploy-currencicombo-from-workspace.sh [--dry-run]
|
||||
|
||||
Requires:
|
||||
PHOENIX_DEPLOY_WORKSPACE Full staged CurrenciCombo checkout prepared by phoenix-deploy-api
|
||||
|
||||
This script:
|
||||
1. Packs the staged repo workspace.
|
||||
2. Pushes it into CT 8604 on r630-01.
|
||||
3. Ensures host prerequisites, install.sh, prune cron, and deploy script run in-CT.
|
||||
4. Updates the public NPMplus host so /api/* preserves the full path and supports SSE.
|
||||
5. Verifies the public portal + /api/ready end to end.
|
||||
USAGE
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--dry-run) DRY_RUN=1; shift ;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*) echo "unknown arg: $1" >&2; usage; exit 2 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
log() { printf '[currencicombo-phoenix] %s\n' "$*" >&2; }
|
||||
die() { printf '[currencicombo-phoenix][FATAL] %s\n' "$*" >&2; exit 1; }
|
||||
run() { if [[ "$DRY_RUN" -eq 1 ]]; then printf '[dry-run] %s\n' "$*" >&2; else eval "$*"; fi; }
|
||||
need_cmd() { command -v "$1" >/dev/null 2>&1 || die "missing required command: $1"; }
|
||||
|
||||
for cmd in ssh scp tar curl jq mktemp; do
|
||||
need_cmd "$cmd"
|
||||
done
|
||||
|
||||
[[ -n "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "PHOENIX_DEPLOY_WORKSPACE is required"
|
||||
[[ -d "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "staged workspace missing: $PHOENIX_DEPLOY_WORKSPACE"
|
||||
|
||||
if [[ "$DRY_RUN" -eq 0 ]]; then
|
||||
[[ -n "$NPM_EMAIL" ]] || die "NPM_EMAIL is required"
|
||||
[[ -n "$NPM_PASSWORD" ]] || die "NPM_PASSWORD is required"
|
||||
fi
|
||||
|
||||
SSH_TARGET="${PROXMOX_SSH_USER}@${PROXMOX_HOST}"
|
||||
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
|
||||
TMP_DIR="$(mktemp -d /tmp/currencicombo-phoenix-XXXXXX)"
|
||||
ARCHIVE_PATH="${TMP_DIR}/currencicombo-workspace.tgz"
|
||||
REMOTE_ARCHIVE="/tmp/$(basename "$ARCHIVE_PATH")"
|
||||
CT_ARCHIVE="/root/$(basename "$ARCHIVE_PATH")"
|
||||
NPM_COOKIE_JAR="${TMP_DIR}/npm-cookies.txt"
|
||||
cleanup() {
|
||||
rm -rf "$TMP_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
ssh_remote() {
|
||||
local cmd="$1"
|
||||
if [[ "$DRY_RUN" -eq 1 ]]; then
|
||||
printf '[dry-run] ssh %q %q\n' "$SSH_TARGET" "$cmd" >&2
|
||||
else
|
||||
ssh "${SSH_OPTS[@]}" "$SSH_TARGET" "$cmd"
|
||||
fi
|
||||
}
|
||||
|
||||
pct_exec_script() {
|
||||
local local_script="$1"
|
||||
local remote_script
|
||||
local ct_script
|
||||
remote_script="/tmp/$(basename "$local_script")"
|
||||
ct_script="/root/$(basename "$local_script")"
|
||||
run "scp ${SSH_OPTS[*]} '$local_script' '${SSH_TARGET}:${remote_script}'"
|
||||
ssh_remote "pct push ${VMID} '${remote_script}' '${ct_script}' --perms 0755 && rm -f '${remote_script}' && pct exec ${VMID} -- bash '${ct_script}' && pct exec ${VMID} -- rm -f '${ct_script}'"
|
||||
}
|
||||
|
||||
log "packing staged workspace from ${PHOENIX_DEPLOY_WORKSPACE}"
|
||||
run "tar -C '$PHOENIX_DEPLOY_WORKSPACE' --exclude='.git' --exclude='node_modules' --exclude='dist' --exclude='orchestrator/node_modules' --exclude='orchestrator/dist' -czf '$ARCHIVE_PATH' ."
|
||||
|
||||
log "ensuring CT ${VMID} is running on ${PROXMOX_HOST}"
|
||||
ssh_remote "pct start ${VMID} >/dev/null 2>&1 || true"
|
||||
|
||||
log "uploading staged archive to CT ${VMID}"
|
||||
run "scp ${SSH_OPTS[*]} '$ARCHIVE_PATH' '${SSH_TARGET}:${REMOTE_ARCHIVE}'"
|
||||
ssh_remote "pct push ${VMID} '${REMOTE_ARCHIVE}' '${CT_ARCHIVE}' && rm -f '${REMOTE_ARCHIVE}'"
|
||||
|
||||
CT_SCRIPT="${TMP_DIR}/currencicombo-ct-deploy.sh"
|
||||
cat > "$CT_SCRIPT" <<'EOF'
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
ARCHIVE_PATH="__CT_ARCHIVE__"
|
||||
REPO_DIR="__CT_REPO_DIR__"
|
||||
|
||||
need_pkg() {
|
||||
dpkg -s "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
apt-get update -qq
|
||||
for pkg in ca-certificates curl git jq postgresql redis-server rsync build-essential; do
|
||||
need_pkg "$pkg" || apt-get install -y -qq "$pkg"
|
||||
done
|
||||
|
||||
if ! command -v node >/dev/null 2>&1 || ! node -v 2>/dev/null | grep -q '^v20\.'; then
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
|
||||
apt-get install -y -qq nodejs
|
||||
fi
|
||||
|
||||
systemctl enable --now postgresql >/dev/null 2>&1 || true
|
||||
systemctl enable --now redis-server >/dev/null 2>&1 || true
|
||||
|
||||
if [[ ! -f /root/currencicombo-prephoenix-archive.tgz && -d /opt/currencicombo ]]; then
|
||||
tar -czf /root/currencicombo-prephoenix-archive.tgz /opt/currencicombo /etc/currencicombo 2>/dev/null || true
|
||||
fi
|
||||
|
||||
install -d -o root -g root -m 0755 "$(dirname "$REPO_DIR")"
|
||||
rm -rf "$REPO_DIR"
|
||||
mkdir -p "$REPO_DIR"
|
||||
tar -xzf "$ARCHIVE_PATH" -C "$REPO_DIR"
|
||||
rm -f "$ARCHIVE_PATH"
|
||||
|
||||
bash "$REPO_DIR/scripts/deployment/install.sh"
|
||||
bash "$REPO_DIR/scripts/deployment/install-prune-cron.sh"
|
||||
CC_GIT_REF=local bash "$REPO_DIR/scripts/deployment/deploy-currencicombo-8604.sh"
|
||||
systemctl is-active currencicombo-orchestrator.service currencicombo-webapp.service
|
||||
curl -fsS http://127.0.0.1:8080/ready
|
||||
curl -fsS http://127.0.0.1:3000/ >/dev/null
|
||||
EOF
|
||||
perl -0pi -e "s|__CT_ARCHIVE__|${CT_ARCHIVE//|/\\|}|g; s|__CT_REPO_DIR__|${CT_REPO_DIR//|/\\|}|g" "$CT_SCRIPT"
|
||||
|
||||
log "running install + deploy inside CT ${VMID}"
|
||||
pct_exec_script "$CT_SCRIPT"
|
||||
|
||||
if [[ "$DRY_RUN" -eq 0 ]]; then
|
||||
log "updating NPMplus proxy host for ${PUBLIC_DOMAIN}"
|
||||
AUTH_JSON="$(jq -nc --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')"
|
||||
TOKEN_RESPONSE="$(curl -sk -X POST "$NPM_URL/api/tokens" -H 'Content-Type: application/json' -d "$AUTH_JSON" -c "$NPM_COOKIE_JAR")"
|
||||
TOKEN="$(echo "$TOKEN_RESPONSE" | jq -r '.token // .accessToken // .access_token // .data.token // empty' 2>/dev/null)"
|
||||
USE_COOKIE_AUTH=0
|
||||
if [[ -z "$TOKEN" || "$TOKEN" == "null" ]]; then
|
||||
if echo "$TOKEN_RESPONSE" | jq -e '.expires' >/dev/null 2>&1; then
|
||||
USE_COOKIE_AUTH=1
|
||||
else
|
||||
die "NPMplus authentication failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
npm_api() {
|
||||
if [[ "$USE_COOKIE_AUTH" -eq 1 ]]; then
|
||||
curl -sk -b "$NPM_COOKIE_JAR" "$@"
|
||||
else
|
||||
curl -sk -H "Authorization: Bearer $TOKEN" "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
HOSTS_JSON="$(npm_api -X GET "$NPM_URL/api/nginx/proxy-hosts")"
|
||||
HOST_ID="$(echo "$HOSTS_JSON" | jq -r --arg domain "$PUBLIC_DOMAIN" '
|
||||
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
|
||||
| map(select(.domain_names | type == "array"))
|
||||
| map(select(any(.domain_names[]; . == $domain)))
|
||||
| .[0].id // empty
|
||||
')"
|
||||
[[ -n "$HOST_ID" ]] || die "NPMplus proxy host not found for ${PUBLIC_DOMAIN}"
|
||||
|
||||
ADVANCED_CONFIG="$(cat <<CFG
|
||||
location ^~ /api/ {
|
||||
proxy_pass http://${CT_IP}:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_set_header Connection \"\";
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
proxy_read_timeout 24h;
|
||||
proxy_send_timeout 24h;
|
||||
add_header Cache-Control \"no-cache\";
|
||||
}
|
||||
CFG
|
||||
)"
|
||||
|
||||
PAYLOAD="$(echo "$HOSTS_JSON" | jq -c --arg domain "$PUBLIC_DOMAIN" --arg host "$CT_IP" --arg advanced "$ADVANCED_CONFIG" '
|
||||
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
|
||||
| map(select(.domain_names | type == "array"))
|
||||
| map(select(any(.domain_names[]; . == $domain)))
|
||||
| .[0]
|
||||
| {
|
||||
domain_names,
|
||||
forward_scheme: (.forward_scheme // "http"),
|
||||
forward_host: $host,
|
||||
forward_port: 3000,
|
||||
access_list_id,
|
||||
certificate_id,
|
||||
ssl_forced,
|
||||
caching_enabled,
|
||||
block_exploits,
|
||||
advanced_config: $advanced,
|
||||
allow_websocket_upgrade,
|
||||
http2_support,
|
||||
hsts_enabled,
|
||||
hsts_subdomains,
|
||||
enabled
|
||||
}
|
||||
')"
|
||||
[[ -n "$PAYLOAD" && "$PAYLOAD" != "null" ]] || die "failed to build NPMplus update payload"
|
||||
UPDATE_RESPONSE="$(npm_api -X PUT "$NPM_URL/api/nginx/proxy-hosts/${HOST_ID}" -H 'Content-Type: application/json' -d "$PAYLOAD")"
|
||||
echo "$UPDATE_RESPONSE" | jq -e '.id != null' >/dev/null 2>&1 || die "NPMplus proxy host update failed"
|
||||
|
||||
log "running public smoke checks"
|
||||
HEADERS="$(curl -skI "$PUBLIC_URL/")"
|
||||
echo "$HEADERS" | grep -q '^HTTP/2 200' || die "public root is not HTTP 200"
|
||||
if echo "$HEADERS" | grep -qi '^x-nextjs-prerender:'; then
|
||||
die "old Next.js headers still present on public root"
|
||||
fi
|
||||
|
||||
curl -sk "$PUBLIC_URL/" | grep -F '<title>Solace Bank Group PLC — Treasury Management Portal</title>' >/dev/null || die "public title mismatch"
|
||||
READY_BODY="$(curl -sk "$PUBLIC_URL/api/ready")"
|
||||
echo "$READY_BODY" | grep -F '"ready":true' >/dev/null || die "public /api/ready failed"
|
||||
curl -skN --max-time 5 -H 'Accept: text/event-stream' "$PUBLIC_URL/api/plans/demo-pay-014/status/stream" | grep -F '"type":"connected"' >/dev/null || die "public SSE smoke failed"
|
||||
|
||||
log "capturing EXT-* blocker summary"
|
||||
ssh_remote "pct exec ${VMID} -- journalctl -u currencicombo-orchestrator.service -n 200 --no-pager | grep -E 'ExternalBlockers|EXT-' || true"
|
||||
fi
|
||||
|
||||
log "CurrenciCombo Phoenix deploy completed from ${PHOENIX_DEPLOY_WORKSPACE}"
|
||||
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
# Deploy explorer-monorepo live site from the Gitea-staged Phoenix workspace.
|
||||
#
|
||||
# phoenix-deploy-api stages non-Proxmox repos in PHOENIX_DEPLOY_WORKSPACE. The
|
||||
# explorer deploy scripts expect the repo to live as PHOENIX_REPO_ROOT/explorer-monorepo
|
||||
# because the API deploy bundles Proxmox reference docs. This wrapper syncs the staged
|
||||
# tree into that layout, then runs the canonical live deploy scripts.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
die() {
|
||||
echo "ERROR: $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
PHOENIX_REPO_ROOT="${PHOENIX_REPO_ROOT:-}"
|
||||
PHOENIX_DEPLOY_WORKSPACE="${PHOENIX_DEPLOY_WORKSPACE:-}"
|
||||
EXPLORER_REPO_DIR="${EXPLORER_REPO_DIR:-${PHOENIX_REPO_ROOT}/explorer-monorepo}"
|
||||
|
||||
[[ -n "$PHOENIX_REPO_ROOT" ]] || die "PHOENIX_REPO_ROOT is required"
|
||||
[[ -d "$PHOENIX_REPO_ROOT" ]] || die "PHOENIX_REPO_ROOT does not exist: $PHOENIX_REPO_ROOT"
|
||||
[[ -n "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "PHOENIX_DEPLOY_WORKSPACE is required"
|
||||
[[ -d "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "staged workspace missing: $PHOENIX_DEPLOY_WORKSPACE"
|
||||
[[ "$EXPLORER_REPO_DIR" != "/" ]] || die "refusing to sync into /"
|
||||
|
||||
echo "Syncing explorer workspace:"
|
||||
echo " from: $PHOENIX_DEPLOY_WORKSPACE"
|
||||
echo " to: $EXPLORER_REPO_DIR"
|
||||
|
||||
mkdir -p "$EXPLORER_REPO_DIR"
|
||||
rsync -a --delete \
|
||||
--exclude '.git/' \
|
||||
--exclude 'node_modules/' \
|
||||
--exclude 'frontend/node_modules/' \
|
||||
--exclude 'frontend/.next/' \
|
||||
--exclude 'backend/bin/' \
|
||||
--exclude 'test-results/' \
|
||||
"$PHOENIX_DEPLOY_WORKSPACE"/ "$EXPLORER_REPO_DIR"/
|
||||
|
||||
cd "$EXPLORER_REPO_DIR"
|
||||
|
||||
FRONTEND_SCRIPT="$EXPLORER_REPO_DIR/scripts/deploy-next-frontend-to-vmid5000.sh"
|
||||
if [[ -f "$FRONTEND_SCRIPT" ]] && ! grep -q 'FORCE_REMOTE_PCT' "$FRONTEND_SCRIPT"; then
|
||||
python3 - "$FRONTEND_SCRIPT" <<'PY'
|
||||
import pathlib
|
||||
import sys
|
||||
|
||||
path = pathlib.Path(sys.argv[1])
|
||||
text = path.read_text()
|
||||
text = text.replace(
|
||||
'FRONTEND_PORT="${FRONTEND_PORT:-3000}"\n',
|
||||
'FRONTEND_PORT="${FRONTEND_PORT:-3000}"\nFORCE_REMOTE_PCT="${FORCE_REMOTE_PCT:-0}"\n',
|
||||
1,
|
||||
)
|
||||
text = text.replace(
|
||||
'if [[ -f /proc/1/cgroup ]] && grep -q "lxc" /proc/1/cgroup 2>/dev/null; then',
|
||||
'if [[ "$FORCE_REMOTE_PCT" != "1" ]] && [[ -f /proc/1/cgroup ]] && grep -q "lxc" /proc/1/cgroup 2>/dev/null; then',
|
||||
)
|
||||
path.write_text(text)
|
||||
PY
|
||||
fi
|
||||
|
||||
export PROXMOX_HOST=192.168.11.12
|
||||
export PROXMOX_HOST_R630_02=192.168.11.12
|
||||
export EXEC_MODE=pct
|
||||
export FORCE_REMOTE_PCT=1
|
||||
|
||||
if [[ ! -x "$EXPLORER_REPO_DIR/frontend/node_modules/.bin/next" ]]; then
|
||||
echo "Installing frontend dependencies with npm ci"
|
||||
(
|
||||
cd "$EXPLORER_REPO_DIR/frontend"
|
||||
npm ci
|
||||
)
|
||||
fi
|
||||
|
||||
echo "Deploying static explorer config assets"
|
||||
bash scripts/deploy-explorer-config-to-vmid5000.sh
|
||||
|
||||
echo "Deploying explorer config/API backend"
|
||||
bash scripts/deploy-explorer-ai-to-vmid5000.sh
|
||||
|
||||
echo "Deploying Next frontend"
|
||||
bash scripts/deploy-next-frontend-to-vmid5000.sh
|
||||
|
||||
echo "Explorer live deployment complete."
|
||||
@@ -19,45 +19,92 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
SMOM="$PROJECT_ROOT/smom-dbis-138"
|
||||
|
||||
# shellcheck source=./scripts/lib/run-summary.sh
|
||||
source "$PROJECT_ROOT/scripts/lib/run-summary.sh"
|
||||
|
||||
RUN_STARTED_AT="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
||||
SECONDS=0
|
||||
|
||||
format_duration() {
|
||||
local total="$1"
|
||||
printf '%02dm:%02ds' "$((total / 60))" "$((total % 60))"
|
||||
}
|
||||
|
||||
DRY_RUN=""
|
||||
SKIP_MIRROR=""
|
||||
SKIP_MESH=""
|
||||
MESH_ONLY=""
|
||||
SKIP_REGISTER_GRU=""
|
||||
SKIP_VERIFY=""
|
||||
JSON_OUT=""
|
||||
for a in "$@"; do
|
||||
[[ "$a" == "--dry-run" ]] && DRY_RUN=1
|
||||
[[ "$a" == "--skip-mirror" ]] && SKIP_MIRROR=1
|
||||
[[ "$a" == "--skip-mesh" ]] && SKIP_MESH=1
|
||||
[[ "$a" == "--legacy-pools-only" ]] && SKIP_MESH=1
|
||||
[[ "$a" == "--mesh-only" ]] && MESH_ONLY=1 && SKIP_MIRROR=1
|
||||
[[ "$a" == "--skip-register-gru" ]] && SKIP_REGISTER_GRU=1
|
||||
[[ "$a" == "--skip-verify" ]] && SKIP_VERIFY=1
|
||||
case "$a" in
|
||||
--dry-run) DRY_RUN=1 ;;
|
||||
--skip-mirror) SKIP_MIRROR=1 ;;
|
||||
--skip-mesh|--legacy-pools-only) SKIP_MESH=1 ;;
|
||||
--mesh-only) MESH_ONLY=1; SKIP_MIRROR=1 ;;
|
||||
--skip-register-gru) SKIP_REGISTER_GRU=1 ;;
|
||||
--skip-verify) SKIP_VERIFY=1 ;;
|
||||
--json-out)
|
||||
JSON_OUT_NEXT=1
|
||||
;;
|
||||
-h|--help)
|
||||
sed -n '1,16p' "$0"
|
||||
echo " --json-out PATH Write a machine-readable run summary JSON."
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
if [[ -n "${JSON_OUT_NEXT:-}" ]]; then
|
||||
JSON_OUT="$a"
|
||||
unset JSON_OUT_NEXT
|
||||
else
|
||||
echo "Unknown argument: $a" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -n "${JSON_OUT_NEXT:-}" ]]; then
|
||||
echo "Missing value for --json-out" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RUN_STATUS="failed"
|
||||
run_summary_init "run-all-next-steps-chain138.sh" "$RUN_STARTED_AT" "$JSON_OUT"
|
||||
trap 'run_summary_write "$RUN_STATUS" "$SECONDS" "$([[ -n "$DRY_RUN" ]] && echo dry-run || echo run)"; run_summary_cleanup' EXIT
|
||||
|
||||
echo "=== Chain 138 — run all next steps ==="
|
||||
echo " started: $RUN_STARTED_AT"
|
||||
echo " dry-run: $DRY_RUN skip-mirror: $SKIP_MIRROR skip-mesh: $SKIP_MESH mesh-only: $MESH_ONLY skip-register-gru: $SKIP_REGISTER_GRU skip-verify: $SKIP_VERIFY"
|
||||
echo ""
|
||||
|
||||
# 1) Preflight
|
||||
echo "--- Step 1: Preflight ---"
|
||||
STEP_STARTED=$SECONDS
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
echo "[DRY-RUN] $PROJECT_ROOT/scripts/deployment/preflight-chain138-deploy.sh"
|
||||
else
|
||||
"$SCRIPT_DIR/preflight-chain138-deploy.sh" || { echo "Preflight failed." >&2; exit 1; }
|
||||
fi
|
||||
run_summary_record_step "1" "Preflight" "$([[ -n "$DRY_RUN" ]] && echo planned || echo success)" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
|
||||
# 2) TransactionMirror + seed pool (legacy step; optional)
|
||||
if [[ -z "$SKIP_MIRROR" ]]; then
|
||||
echo "--- Step 2: TransactionMirror + seed pool ---"
|
||||
STEP_STARTED=$SECONDS
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
echo "[DRY-RUN] $PROJECT_ROOT/scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh"
|
||||
else
|
||||
"$PROJECT_ROOT/scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh" || { echo "Deploy failed." >&2; exit 1; }
|
||||
fi
|
||||
run_summary_record_step "2" "TransactionMirror + seed pool" "$([[ -n "$DRY_RUN" ]] && echo planned || echo success)" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
else
|
||||
run_summary_record_step "2" "TransactionMirror + seed pool" "skipped" "0"
|
||||
echo "--- Step 2: TransactionMirror + seed pool (skipped) ---"
|
||||
echo ""
|
||||
fi
|
||||
@@ -65,13 +112,17 @@ fi
|
||||
# 3) PMM full mesh (default on Chain 138)
|
||||
if [[ -z "$SKIP_MESH" ]]; then
|
||||
echo "--- Step 3: PMM full mesh (Chain 138) ---"
|
||||
STEP_STARTED=$SECONDS
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
echo "[DRY-RUN] $PROJECT_ROOT/scripts/create-pmm-full-mesh-chain138.sh"
|
||||
else
|
||||
"$PROJECT_ROOT/scripts/create-pmm-full-mesh-chain138.sh" || { echo "PMM full mesh failed." >&2; exit 1; }
|
||||
fi
|
||||
run_summary_record_step "3" "PMM full mesh (Chain 138)" "$([[ -n "$DRY_RUN" ]] && echo planned || echo success)" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
else
|
||||
run_summary_record_step "3" "PMM full mesh (Chain 138)" "skipped" "0"
|
||||
echo "--- Step 3: PMM full mesh (skipped; legacy-only mode) ---"
|
||||
echo ""
|
||||
fi
|
||||
@@ -79,11 +130,15 @@ fi
|
||||
# 4) Register c* as GRU (optional)
|
||||
if [[ -z "$SKIP_REGISTER_GRU" ]]; then
|
||||
echo "--- Step 4: Register c* as GRU (UniversalAssetRegistry) ---"
|
||||
STEP_STARTED=$SECONDS
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
echo "[DRY-RUN] cd $SMOM && forge script script/deploy/RegisterGRUCompliantTokens.s.sol --rpc-url \$RPC_URL_138 --broadcast --private-key \$PRIVATE_KEY --with-gas-price 1000000000"
|
||||
else
|
||||
if [[ -f "$SMOM/.env" ]]; then
|
||||
set -a; source "$SMOM/.env"; set +a
|
||||
set -a
|
||||
# shellcheck source=/dev/null
|
||||
source "$SMOM/.env"
|
||||
set +a
|
||||
# Fallback: Register script expects CUSDT_ADDRESS_138/CUSDC_ADDRESS_138; use COMPLIANT_USDT/COMPLIANT_USDC if set
|
||||
[[ -z "${CUSDT_ADDRESS_138:-}" && -n "${COMPLIANT_USDT:-}" ]] && export CUSDT_ADDRESS_138="$COMPLIANT_USDT"
|
||||
[[ -z "${CUSDC_ADDRESS_138:-}" && -n "${COMPLIANT_USDC:-}" ]] && export CUSDC_ADDRESS_138="$COMPLIANT_USDC"
|
||||
@@ -96,8 +151,11 @@ if [[ -z "$SKIP_REGISTER_GRU" ]]; then
|
||||
echo "Skip: $SMOM/.env not found."
|
||||
fi
|
||||
fi
|
||||
run_summary_record_step "4" "Register c* as GRU (UniversalAssetRegistry)" "$([[ -n "$DRY_RUN" ]] && echo planned || echo success)" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
else
|
||||
run_summary_record_step "4" "Register c* as GRU (UniversalAssetRegistry)" "skipped" "0"
|
||||
echo "--- Step 4: Register c* as GRU (skipped) ---"
|
||||
echo ""
|
||||
fi
|
||||
@@ -105,16 +163,27 @@ fi
|
||||
# 5) Verify
|
||||
if [[ -z "$SKIP_VERIFY" ]]; then
|
||||
echo "--- Step 5: On-chain verification ---"
|
||||
STEP_STARTED=$SECONDS
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
echo "[DRY-RUN] $PROJECT_ROOT/scripts/verify/check-contracts-on-chain-138.sh"
|
||||
else
|
||||
[[ -f "$SMOM/.env" ]] && set -a && source "$SMOM/.env" && set +a
|
||||
if [[ -f "$SMOM/.env" ]]; then
|
||||
set -a
|
||||
# shellcheck source=/dev/null
|
||||
source "$SMOM/.env"
|
||||
set +a
|
||||
fi
|
||||
"$PROJECT_ROOT/scripts/verify/check-contracts-on-chain-138.sh" "${RPC_URL_138:-}" || true
|
||||
fi
|
||||
run_summary_record_step "5" "On-chain verification" "$([[ -n "$DRY_RUN" ]] && echo planned || echo success)" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
else
|
||||
run_summary_record_step "5" "On-chain verification" "skipped" "0"
|
||||
echo "--- Step 5: Verify (skipped) ---"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "=== Next steps run complete. ==="
|
||||
echo "Total elapsed: $(format_duration "$SECONDS")"
|
||||
RUN_STATUS="success"
|
||||
|
||||
99
scripts/lib/run-summary.sh
Normal file
99
scripts/lib/run-summary.sh
Normal file
@@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env bash
|
||||
# Shared machine-readable run summary helper for wrapper scripts.
|
||||
# Usage:
|
||||
# source scripts/lib/run-summary.sh
|
||||
# run_summary_init "<script-name>" "$RUN_STARTED_AT" "${JSON_OUT:-}"
|
||||
# run_summary_record_step "1" "Config validation" "success" "12"
|
||||
# run_summary_write "$RUN_STATUS" "$SECONDS" "$RUN_MODE"
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
RUN_SUMMARY_TMP=""
|
||||
RUN_SUMMARY_SCRIPT_NAME=""
|
||||
RUN_SUMMARY_STARTED_AT=""
|
||||
RUN_SUMMARY_JSON_OUT=""
|
||||
|
||||
run_summary_init() {
|
||||
RUN_SUMMARY_SCRIPT_NAME="$1"
|
||||
RUN_SUMMARY_STARTED_AT="$2"
|
||||
RUN_SUMMARY_JSON_OUT="${3:-}"
|
||||
|
||||
if [[ -n "$RUN_SUMMARY_JSON_OUT" ]]; then
|
||||
RUN_SUMMARY_TMP="$(mktemp)"
|
||||
fi
|
||||
}
|
||||
|
||||
run_summary_record_step() {
|
||||
local step_number="$1"
|
||||
local step_name="$2"
|
||||
local step_status="$3"
|
||||
local duration_seconds="$4"
|
||||
|
||||
if [[ -z "$RUN_SUMMARY_JSON_OUT" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
printf '%s\t%s\t%s\t%s\n' \
|
||||
"$step_number" \
|
||||
"$step_name" \
|
||||
"$step_status" \
|
||||
"$duration_seconds" >> "$RUN_SUMMARY_TMP"
|
||||
}
|
||||
|
||||
run_summary_write() {
|
||||
local overall_status="$1"
|
||||
local total_elapsed_seconds="$2"
|
||||
local run_mode="${3:-run}"
|
||||
|
||||
if [[ -z "$RUN_SUMMARY_JSON_OUT" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
mkdir -p "$(dirname "$RUN_SUMMARY_JSON_OUT")"
|
||||
|
||||
python3 - "$RUN_SUMMARY_TMP" "$RUN_SUMMARY_JSON_OUT" "$RUN_SUMMARY_SCRIPT_NAME" "$RUN_SUMMARY_STARTED_AT" "$overall_status" "$total_elapsed_seconds" "$run_mode" <<'PY'
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
tmp_path = Path(sys.argv[1])
|
||||
json_out = Path(sys.argv[2])
|
||||
script_name = sys.argv[3]
|
||||
started_at = sys.argv[4]
|
||||
overall_status = sys.argv[5]
|
||||
total_elapsed_seconds = int(sys.argv[6])
|
||||
run_mode = sys.argv[7]
|
||||
|
||||
steps = []
|
||||
if tmp_path.exists():
|
||||
for raw_line in tmp_path.read_text(encoding="utf-8").splitlines():
|
||||
if not raw_line.strip():
|
||||
continue
|
||||
number, name, status, duration_seconds = raw_line.split("\t", 3)
|
||||
steps.append(
|
||||
{
|
||||
"step": number,
|
||||
"name": name,
|
||||
"status": status,
|
||||
"duration_seconds": int(duration_seconds),
|
||||
}
|
||||
)
|
||||
|
||||
payload = {
|
||||
"script": script_name,
|
||||
"started_at_utc": started_at,
|
||||
"overall_status": overall_status,
|
||||
"run_mode": run_mode,
|
||||
"total_elapsed_seconds": total_elapsed_seconds,
|
||||
"steps": steps,
|
||||
}
|
||||
|
||||
json_out.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
|
||||
PY
|
||||
}
|
||||
|
||||
run_summary_cleanup() {
|
||||
if [[ -n "$RUN_SUMMARY_TMP" && -f "$RUN_SUMMARY_TMP" ]]; then
|
||||
rm -f "$RUN_SUMMARY_TMP"
|
||||
fi
|
||||
}
|
||||
76
scripts/operator/sync-pmg-webui-password-to-dotenv.sh
Executable file
76
scripts/operator/sync-pmg-webui-password-to-dotenv.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env bash
|
||||
# Fetch Proxmox Mail Gateway (LXC 100) web UI password from the container and upsert
|
||||
# it into the repo .env as PMG_WEBUI_PASSWORD="..."
|
||||
#
|
||||
# Usage (from repo root):
|
||||
# bash scripts/operator/sync-pmg-webui-password-to-dotenv.sh
|
||||
# PROXMOX_SSH=root@192.168.11.11 PMG_VMID=100 bash ...
|
||||
#
|
||||
# Does not print the password. Backs up .env to .env.bak.pmg.<timestamp> before edit.
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
PROXMOX_SSH="${PROXMOX_SSH:-root@192.168.11.11}"
|
||||
PMG_VMID="${PMG_VMID:-100}"
|
||||
KEY="${PMG_ENV_KEY:-PMG_WEBUI_PASSWORD}"
|
||||
DOTENV="${DOTENV_FILE:-$PROJECT_ROOT/.env}"
|
||||
PW_PATH="${PMG_PASSWORD_FILE:-/root/PMG_WEBUI_password.txt}"
|
||||
|
||||
if [ ! -e "$DOTENV" ]; then
|
||||
: > "$DOTENV"
|
||||
echo "Created $DOTENV"
|
||||
fi
|
||||
|
||||
if ! command -v ssh >/dev/null; then
|
||||
echo "ssh not found" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! command -v python3 >/dev/null; then
|
||||
echo "python3 not found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PW=$(
|
||||
ssh -o ConnectTimeout=15 -o BatchMode=yes "$PROXMOX_SSH" "pct exec $PMG_VMID -- cat $PW_PATH" 2>/dev/null | tr -d '\r' || true
|
||||
)
|
||||
# Trim leading/trailing whitespace only
|
||||
PW="${PW#"${PW%%[![:space:]]*}"}"
|
||||
PW="${PW%"${PW##*[![:space:]]}"}"
|
||||
|
||||
if [ -z "$PW" ]; then
|
||||
echo "Failed to read password (empty or ssh failed). Check: ssh $PROXMOX_SSH 'pct exec $PMG_VMID -- test -f $PW_PATH'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TS=$(date +%Y%m%d_%H%M%S)
|
||||
if [ -f "$DOTENV" ] && [ -s "$DOTENV" ]; then
|
||||
cp -a "$DOTENV" "$DOTENV.bak.pmg.$TS"
|
||||
echo "Backup: $DOTENV.bak.pmg.$TS"
|
||||
fi
|
||||
|
||||
export DOTENV_PATH="$DOTENV" DOTENV_KEY="$KEY"
|
||||
# shellcheck disable=SC2016,SC2090
|
||||
python3 -c '
|
||||
import os, re
|
||||
import sys
|
||||
path = os.environ["DOTENV_PATH"]
|
||||
key = os.environ["DOTENV_KEY"]
|
||||
pw = sys.argv[1]
|
||||
|
||||
def dquote(s: str) -> str:
|
||||
return "\"" + s.replace("\\", "\\\\").replace("\"", "\\\"") + "\""
|
||||
|
||||
line = key + "=" + dquote(pw) + "\n"
|
||||
with open(path) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
out_lines = [ln for ln in lines if not re.match(r"^" + re.escape(key) + r"\s*=", ln)]
|
||||
out_lines.append(line)
|
||||
with open(path, "w") as f:
|
||||
f.writelines(out_lines)
|
||||
print("Wrote " + key + " to " + path + " (value not shown).")
|
||||
' -- "$PW"
|
||||
unset DOTENV_PATH DOTENV_KEY
|
||||
|
||||
echo "Done."
|
||||
55
scripts/proxmox/print-nathan-remote-operator-onboarding.sh
Executable file
55
scripts/proxmox/print-nathan-remote-operator-onboarding.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
# Print what to hand to Nathan: Proxmox API env + checklist for Gitea/CF/VPN.
|
||||
# The secrets file is created on the operator LAN host by:
|
||||
# ./scripts/proxmox/setup-nathan-remote-operator-proxmox.sh
|
||||
#
|
||||
# Usage: ./scripts/proxmox/print-nathan-remote-operator-onboarding.sh
|
||||
#
|
||||
# **Run only on a trusted machine; output contains the live API token.**
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
ENV_FILE="${NATHAN_ENV_FILE:-${PROJECT_ROOT}/reports/secrets/nathan-remote-operator.env}"
|
||||
|
||||
cat <<'PREAMBLE'
|
||||
================================================================================
|
||||
NATHAN — Remote operator (Devin.ai + Cursor)
|
||||
================================================================================
|
||||
|
||||
Copy "PROXMOX_*" below for:
|
||||
- Devin.ai: paste into their secret/connection fields for Proxmox.
|
||||
|
||||
- Cursor: Nathan pastes the same block into this repo’s root .env
|
||||
(or parent .env for mcp-proxmox) on his laptop, chmod 600 .env
|
||||
|
||||
- MCP: docs/04-configuration/MCP_SETUP.md
|
||||
- Optional: PROXMOX_ALLOW_ELEVATED=false in MCP (default) unless elevated tools are required
|
||||
|
||||
NETWORK (separate from Proxmox token — IT / you)
|
||||
- Nathan needs a path to reach PROXMOX_HOST:8006 and LAN services (Gitea, RPC,
|
||||
NPM) — VPN, Cloudflare Access/WARP, or SSH tunnel. See:
|
||||
docs/00-meta/OPERATOR_CREDENTIALS_CHECKLIST.md
|
||||
docs/04-configuration/cloudflare/CLOUDFLARE_ZERO_TRUST_GUIDE.md
|
||||
|
||||
GITEA
|
||||
- Add Nathan; grant repos; per-repo Gitea Action secrets: see
|
||||
docs/04-configuration/DEVIN_GITEA_PROXMOX_CICD.md
|
||||
(PHOENIX_DEPLOY_URL, PHOENIX_DEPLOY_TOKEN, etc.)
|
||||
|
||||
PROXMOX API BLOCK (sensitive)
|
||||
--------------------------------------------------------------------------------
|
||||
PREAMBLE
|
||||
|
||||
if [[ -f "$ENV_FILE" ]]; then
|
||||
grep -E '^#|^PROXMOX_' "$ENV_FILE" || true
|
||||
echo "--------------------------------------------------------------------------------"
|
||||
echo "Full file path: $ENV_FILE (not for git; reports/secrets/ is gitignored)"
|
||||
else
|
||||
echo ""
|
||||
echo "(No secrets file yet.)"
|
||||
echo " On a LAN/operator host: ./scripts/proxmox/setup-nathan-remote-operator-proxmox.sh"
|
||||
echo " Then re-run: $0"
|
||||
fi
|
||||
echo "================================================================================"
|
||||
82
scripts/proxmox/setup-nathan-remote-operator-proxmox.sh
Executable file
82
scripts/proxmox/setup-nathan-remote-operator-proxmox.sh
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
# On a LAN host with SSH to Proxmox: create Nathan's PVE API token, set Datacenter ACL,
|
||||
# and write reports/secrets/nathan-remote-operator.env (gitignored) for handoff to Devin + Cursor.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/proxmox/setup-nathan-remote-operator-proxmox.sh
|
||||
# NATHAN_PVE_TOKEN_NAME=nathan-devin PROXMOX_SSH_HOST=192.168.11.10 ./scripts/proxmox/setup-nathan-remote-operator-proxmox.sh
|
||||
# ./scripts/proxmox/setup-nathan-remote-operator-proxmox.sh --recreate # remove same-named token first
|
||||
#
|
||||
# Env:
|
||||
# PROXMOX_SSH_HOST — SSH target (PVE node); default 192.168.11.11
|
||||
# PVE_TOKEN_USER — PVE user for the token; default root@pam
|
||||
# NATHAN_PVE_TOKEN_NAME — API token name (not full id); default nathan-devin
|
||||
# NATHAN_PROXMOX_API_HOST — hostname Nathan uses from off-LAN; default proxmox-api.d-bis.org
|
||||
#
|
||||
# See: docs/04-configuration/PROXMOX_API_SECRETS_DOTENV.md
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
# shellcheck disable=SC1090
|
||||
[[ -f "$PROJECT_ROOT/.env" ]] && set -a && source "$PROJECT_ROOT/.env" 2>/dev/null && set +a || true
|
||||
|
||||
RECREATE=false
|
||||
for a in "$@"; do
|
||||
[[ "$a" == "--recreate" ]] && RECREATE=true
|
||||
done
|
||||
|
||||
SSH_HOST="${PROXMOX_SSH_HOST:-${PROXMOX_R630_01:-${PROXMOX_HOST:-192.168.11.11}}}"
|
||||
PVE_USER="${PVE_TOKEN_USER:-root@pam}"
|
||||
TOKEN_BARE_NAME="${NATHAN_PVE_TOKEN_NAME:-nathan-devin}"
|
||||
API_HOST="${NATHAN_PROXMOX_API_HOST:-proxmox-api.d-bis.org}"
|
||||
SECRETS_DIR="${PROJECT_ROOT}/reports/secrets"
|
||||
OUT_ENV="${SECRETS_DIR}/nathan-remote-operator.env"
|
||||
TOKEN_ID="${PVE_USER}!${TOKEN_BARE_NAME}"
|
||||
|
||||
echo "Nathan Proxmox API — setup on cluster via SSH"
|
||||
echo " SSH: root@${SSH_HOST} user: ${PVE_USER} token name: ${TOKEN_BARE_NAME} public API host: ${API_HOST}"
|
||||
echo ""
|
||||
|
||||
if [[ "$RECREATE" == true ]]; then
|
||||
echo "Removing existing token (if any): ${TOKEN_ID} ..."
|
||||
ssh -o ConnectTimeout=12 -o BatchMode=yes "root@${SSH_HOST}" \
|
||||
"pveum user token remove ${PVE_USER} ${TOKEN_BARE_NAME} 2>/dev/null" || true
|
||||
fi
|
||||
|
||||
add_cmd="pveum user token add ${PVE_USER} ${TOKEN_BARE_NAME} --privsep=0 2>&1"
|
||||
OUTPUT=$(ssh -o ConnectTimeout=12 -o BatchMode=yes "root@${SSH_HOST}" "$add_cmd" || true)
|
||||
|
||||
TOKEN_VALUE=$(echo "$OUTPUT" | sed -n 's/.*│ value *│ *\([a-f0-9-]*\) *│.*/\1/p' | tr -d ' ')
|
||||
if [[ -z "$TOKEN_VALUE" ]]; then
|
||||
TOKEN_VALUE=$(echo "$OUTPUT" | grep -oE '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' | head -1)
|
||||
fi
|
||||
if [[ -z "$TOKEN_VALUE" ]]; then
|
||||
echo "ERROR: Could not create or parse API token. If the token already exists, run with --recreate" >&2
|
||||
echo "pveum output:" >&2
|
||||
echo "$OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Setting Datacenter / ACL to Administrator for ${TOKEN_ID} ..."
|
||||
ssh -o ConnectTimeout=12 -o BatchMode=yes "root@${SSH_HOST}" \
|
||||
"pveum acl modify / -token '${TOKEN_ID}' -role Administrator"
|
||||
|
||||
mkdir -p "$SECRETS_DIR"
|
||||
umask 077
|
||||
{
|
||||
echo "# Generated by scripts/proxmox/setup-nathan-remote-operator-proxmox.sh — do not commit (reports/secrets/ is gitignored)"
|
||||
echo "# Proxmox API (Devin + Cursor mcp-proxmox / validate scripts) — use public hostname when off-LAN (requires VPN/Access path if restricted)"
|
||||
echo "PROXMOX_HOST=${API_HOST}"
|
||||
echo "PROXMOX_PORT=8006"
|
||||
echo "PROXMOX_USER=${PVE_USER}"
|
||||
echo "PROXMOX_TOKEN_NAME=${TOKEN_BARE_NAME}"
|
||||
echo "PROXMOX_TOKEN_VALUE=${TOKEN_VALUE}"
|
||||
} > "$OUT_ENV"
|
||||
chmod 600 "$OUT_ENV"
|
||||
|
||||
echo ""
|
||||
echo "OK. Wrote ${OUT_ENV} (mode 600)."
|
||||
echo "Run: ./scripts/proxmox/print-nathan-remote-operator-onboarding.sh"
|
||||
echo "Do not commit ${OUT_ENV}."
|
||||
@@ -113,7 +113,8 @@ if [ "$NEED_CERT_COUNT" = "0" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Optional: only process domains matching this grep pattern (e.g. "rpc-fireblocks|ws.rpc-fireblocks")
|
||||
# Optional: only process lines matching this grep -E pattern (lines look like "87|mail.d-bis.org").
|
||||
# Example: CERT_DOMAINS_FILTER='mail\.d-bis\.org' (not ^mail — the line starts with a numeric id)
|
||||
if [ -n "${CERT_DOMAINS_FILTER:-}" ]; then
|
||||
NEED_CERT_LIST=$(echo "$NEED_CERT_LIST" | grep -E "$CERT_DOMAINS_FILTER" || true)
|
||||
NEED_CERT_COUNT=$(echo "$NEED_CERT_LIST" | grep -c . 2>/dev/null || echo "0")
|
||||
|
||||
@@ -17,9 +17,15 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# shellcheck source=./scripts/lib/run-summary.sh
|
||||
source "$SCRIPT_DIR/lib/run-summary.sh"
|
||||
|
||||
RUN_STARTED_AT="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
||||
SECONDS=0
|
||||
|
||||
# Always load dotenv so Operator/LAN has NPM_PASSWORD, PRIVATE_KEY, RPC, etc.
|
||||
if [[ -f "$SCRIPT_DIR/lib/load-project-env.sh" ]]; then
|
||||
# shellcheck source=scripts/lib/load-project-env.sh
|
||||
# shellcheck source=./scripts/lib/load-project-env.sh
|
||||
source "$SCRIPT_DIR/lib/load-project-env.sh"
|
||||
fi
|
||||
|
||||
@@ -28,25 +34,55 @@ SKIP_BACKUP=false
|
||||
SKIP_VERIFY=false
|
||||
DO_DEPLOY=false
|
||||
DO_CREATE_VMS=false
|
||||
JSON_OUT=""
|
||||
|
||||
format_duration() {
|
||||
local total="$1"
|
||||
printf '%02dm:%02ds' "$((total / 60))" "$((total % 60))"
|
||||
}
|
||||
|
||||
for a in "$@"; do
|
||||
[[ "$a" == "--dry-run" ]] && DRY_RUN=true
|
||||
[[ "$a" == "--skip-backup" ]] && SKIP_BACKUP=true
|
||||
[[ "$a" == "--skip-verify" ]] && SKIP_VERIFY=true
|
||||
[[ "$a" == "--deploy" ]] && DO_DEPLOY=true
|
||||
[[ "$a" == "--create-vms" ]] && DO_CREATE_VMS=true
|
||||
[[ "$a" == "-h" || "$a" == "--help" ]] && {
|
||||
echo "Usage: $0 [--dry-run] [--skip-backup] [--skip-verify] [--deploy] [--create-vms]"
|
||||
echo " --dry-run Print steps only, do not run."
|
||||
echo " --skip-backup Skip NPMplus backup."
|
||||
echo " --skip-verify Skip Blockscout contract verification."
|
||||
echo " --deploy Also run contract deployment (smom-dbis-138 phased + TransactionMirror if needed)."
|
||||
echo " --create-vms Also create Proxmox containers (DBIS Core 6 containers; requires SSH to PROXMOX_HOST)."
|
||||
echo "See: docs/00-meta/STEPS_FROM_PROXMOX_OR_LAN_WITH_SECRETS.md"
|
||||
exit 0
|
||||
}
|
||||
case "$a" in
|
||||
--dry-run) DRY_RUN=true ;;
|
||||
--skip-backup) SKIP_BACKUP=true ;;
|
||||
--skip-verify) SKIP_VERIFY=true ;;
|
||||
--deploy) DO_DEPLOY=true ;;
|
||||
--create-vms) DO_CREATE_VMS=true ;;
|
||||
--json-out)
|
||||
JSON_OUT_NEXT=true
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [--dry-run] [--skip-backup] [--skip-verify] [--deploy] [--create-vms] [--json-out PATH]"
|
||||
echo " --dry-run Print steps only, do not run."
|
||||
echo " --skip-backup Skip NPMplus backup."
|
||||
echo " --skip-verify Skip Blockscout contract verification."
|
||||
echo " --deploy Also run contract deployment (smom-dbis-138 phased + TransactionMirror if needed)."
|
||||
echo " --create-vms Also create Proxmox containers (DBIS Core 6 containers; requires SSH to PROXMOX_HOST)."
|
||||
echo " --json-out Write a machine-readable run summary JSON."
|
||||
echo "See: docs/00-meta/STEPS_FROM_PROXMOX_OR_LAN_WITH_SECRETS.md"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
if [[ "${JSON_OUT_NEXT:-false}" == true ]]; then
|
||||
JSON_OUT="$a"
|
||||
JSON_OUT_NEXT=false
|
||||
else
|
||||
echo "Unknown argument: $a" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ "${JSON_OUT_NEXT:-false}" == true ]]; then
|
||||
echo "Missing value for --json-out" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RUN_STATUS="failed"
|
||||
run_summary_init "run-all-operator-tasks-from-lan.sh" "$RUN_STARTED_AT" "$JSON_OUT"
|
||||
trap 'run_summary_write "$RUN_STATUS" "$SECONDS" "$([[ "$DRY_RUN" == true ]] && echo dry-run || echo run)"; run_summary_cleanup' EXIT
|
||||
|
||||
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
|
||||
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
|
||||
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
|
||||
@@ -54,58 +90,99 @@ log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
|
||||
|
||||
echo ""
|
||||
echo "=== Run all operator tasks (from LAN) ==="
|
||||
echo " started=$RUN_STARTED_AT"
|
||||
echo " dry-run=$DRY_RUN skip-backup=$SKIP_BACKUP skip-verify=$SKIP_VERIFY deploy=$DO_DEPLOY create-vms=$DO_CREATE_VMS"
|
||||
echo ""
|
||||
|
||||
# 1) Wave 0: NPMplus RPC fix + backup
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
run_summary_record_step "1" "Wave 0: NPMplus RPC fix + backup" "planned" "0"
|
||||
echo "[DRY-RUN] Would run: run-wave0-from-lan.sh (NPMplus RPC fix + backup)"
|
||||
else
|
||||
bash "$SCRIPT_DIR/run-wave0-from-lan.sh" $([[ "$SKIP_BACKUP" == true ]] && echo --skip-backup)
|
||||
STEP_STARTED=$SECONDS
|
||||
WAVE0_ARGS=()
|
||||
if [[ "$SKIP_BACKUP" == true ]]; then
|
||||
WAVE0_ARGS+=(--skip-backup)
|
||||
fi
|
||||
bash "$SCRIPT_DIR/run-wave0-from-lan.sh" "${WAVE0_ARGS[@]}"
|
||||
run_summary_record_step "1" "Wave 0: NPMplus RPC fix + backup" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 2) Blockscout verification
|
||||
if [[ "$SKIP_VERIFY" != true ]]; then
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
run_summary_record_step "2" "Blockscout verification" "planned" "0"
|
||||
echo "[DRY-RUN] Would run: ./scripts/verify/run-contract-verification-with-proxy.sh (dotenv already loaded)"
|
||||
else
|
||||
STEP_STARTED=$SECONDS
|
||||
log_info "Blockscout source verification..."
|
||||
(bash "$SCRIPT_DIR/verify/run-contract-verification-with-proxy.sh") || log_warn "Blockscout verify skipped (env or script failed)"
|
||||
run_summary_record_step "2" "Blockscout verification" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
fi
|
||||
echo ""
|
||||
else
|
||||
run_summary_record_step "2" "Blockscout verification" "skipped" "0"
|
||||
fi
|
||||
|
||||
# 3) Optional: contract deployment (PRIVATE_KEY from dotenv already loaded above)
|
||||
if [[ "$DO_DEPLOY" == true ]]; then
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
run_summary_record_step "3" "Contract deployment (phased + TransactionMirror)" "planned" "0"
|
||||
echo "[DRY-RUN] Would run: smom-dbis-138 deploy-all-phases.sh (and deploy-transaction-mirror-chain138.sh if needed)"
|
||||
else
|
||||
if [[ -n "${PRIVATE_KEY:-}" ]]; then
|
||||
STEP_STARTED=$SECONDS
|
||||
log_info "Contract deployment (phased)..."
|
||||
(cd smom-dbis-138 && ./scripts/deployment/deploy-all-phases.sh) && log_ok "Phased deploy done" || log_warn "Phased deploy failed (may already be deployed)"
|
||||
if (cd smom-dbis-138 && ./scripts/deployment/deploy-all-phases.sh); then
|
||||
log_ok "Phased deploy done"
|
||||
else
|
||||
log_warn "Phased deploy failed (may already be deployed)"
|
||||
fi
|
||||
log_info "TransactionMirror (if needed)..."
|
||||
bash "$SCRIPT_DIR/deployment/deploy-transaction-mirror-chain138.sh" 2>/dev/null && log_ok "TransactionMirror deployed" || log_warn "TransactionMirror skipped or failed (add TRANSACTION_MIRROR_ADDRESS to .env if deployed)"
|
||||
if bash "$SCRIPT_DIR/deployment/deploy-transaction-mirror-chain138.sh" 2>/dev/null; then
|
||||
log_ok "TransactionMirror deployed"
|
||||
else
|
||||
log_warn "TransactionMirror skipped or failed (add TRANSACTION_MIRROR_ADDRESS to .env if deployed)"
|
||||
fi
|
||||
run_summary_record_step "3" "Contract deployment (phased + TransactionMirror)" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
else
|
||||
run_summary_record_step "3" "Contract deployment (phased + TransactionMirror)" "skipped" "0"
|
||||
log_warn "PRIVATE_KEY not set; set in smom-dbis-138/.env or .env and re-run"
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
else
|
||||
run_summary_record_step "3" "Contract deployment (phased + TransactionMirror)" "skipped" "0"
|
||||
fi
|
||||
|
||||
# 4) Optional: create Proxmox containers (DBIS Core)
|
||||
if [[ "$DO_CREATE_VMS" == true ]]; then
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
run_summary_record_step "4" "Create DBIS Core containers" "planned" "0"
|
||||
echo "[DRY-RUN] Would run: dbis_core/scripts/deployment/create-dbis-core-containers.sh"
|
||||
else
|
||||
if [[ -f dbis_core/scripts/deployment/create-dbis-core-containers.sh ]]; then
|
||||
STEP_STARTED=$SECONDS
|
||||
log_info "Creating DBIS Core containers (if missing)..."
|
||||
NON_INTERACTIVE=1 bash dbis_core/scripts/deployment/create-dbis-core-containers.sh 2>/dev/null && log_ok "DBIS Core containers done" || log_warn "DBIS Core create failed or skipped (check PROXMOX_HOST SSH)"
|
||||
if NON_INTERACTIVE=1 bash dbis_core/scripts/deployment/create-dbis-core-containers.sh 2>/dev/null; then
|
||||
log_ok "DBIS Core containers done"
|
||||
else
|
||||
log_warn "DBIS Core create failed or skipped (check PROXMOX_HOST SSH)"
|
||||
fi
|
||||
run_summary_record_step "4" "Create DBIS Core containers" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
else
|
||||
run_summary_record_step "4" "Create DBIS Core containers" "skipped" "0"
|
||||
log_warn "create-dbis-core-containers.sh not found; skipping"
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
else
|
||||
run_summary_record_step "4" "Create DBIS Core containers" "skipped" "0"
|
||||
fi
|
||||
|
||||
echo "=== Next steps (manual if needed) ==="
|
||||
@@ -116,3 +193,5 @@ echo " Backup cron: bash scripts/maintenance/schedule-npmplus-backup-cron.
|
||||
echo " Daily/weekly: bash scripts/maintenance/schedule-daily-weekly-cron.sh --install"
|
||||
echo " Full steps list: docs/00-meta/STEPS_FROM_PROXMOX_OR_LAN_WITH_SECRETS.md"
|
||||
echo ""
|
||||
echo "Total elapsed: $(format_duration "$SECONDS")"
|
||||
RUN_STATUS="success"
|
||||
|
||||
88
scripts/run-completable-tasks-from-anywhere.sh
Executable file → Normal file
88
scripts/run-completable-tasks-from-anywhere.sh
Executable file → Normal file
@@ -2,7 +2,7 @@
|
||||
# Run all tasks that do NOT require LAN, Proxmox SSH, PRIVATE_KEY, or NPM_PASSWORD.
|
||||
# Use from dev machine / WSL / CI. For tasks that need LAN/creds, see run-operator-tasks-from-lan.sh.
|
||||
# Usage: ./scripts/run-completable-tasks-from-anywhere.sh [--dry-run]
|
||||
# --dry-run Print the four steps only; do not run them (exit 0).
|
||||
# --dry-run Print the five steps only; do not run them (exit 0).
|
||||
#
|
||||
# Exit codes (Unix convention): 0 = success (all steps passed), non-zero = failure.
|
||||
# Do not "fix" exit 0 — it means the script completed successfully.
|
||||
@@ -13,43 +13,113 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# shellcheck source=./scripts/lib/run-summary.sh
|
||||
source "$SCRIPT_DIR/lib/run-summary.sh"
|
||||
|
||||
RUN_STARTED_AT="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
||||
SECONDS=0
|
||||
|
||||
DRY_RUN=false
|
||||
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true && break; done
|
||||
JSON_OUT=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--json-out)
|
||||
[[ $# -ge 2 ]] || { echo "Missing value for --json-out" >&2; exit 1; }
|
||||
JSON_OUT="$2"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
sed -n '1,8p' "$0"
|
||||
echo " --json-out PATH Write a machine-readable run summary JSON."
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
RUN_STATUS="failed"
|
||||
RUN_MODE="run"
|
||||
run_summary_init "run-completable-tasks-from-anywhere.sh" "$RUN_STARTED_AT" "$JSON_OUT"
|
||||
trap 'run_summary_write "$RUN_STATUS" "$SECONDS" "$RUN_MODE"; run_summary_cleanup' EXIT
|
||||
|
||||
format_duration() {
|
||||
local total="$1"
|
||||
printf '%02dm:%02ds' "$((total / 60))" "$((total % 60))"
|
||||
}
|
||||
|
||||
if $DRY_RUN; then
|
||||
RUN_MODE="dry-run"
|
||||
run_summary_record_step "1" "Config validation" "planned" "0"
|
||||
run_summary_record_step "2" "On-chain contract check (Chain 138)" "planned" "0"
|
||||
run_summary_record_step "3" "Run all validation (--skip-genesis)" "planned" "0"
|
||||
run_summary_record_step "4" "Non-EVM public health + lane status" "planned" "0"
|
||||
run_summary_record_step "5" "Canonical .env reconciliation output" "planned" "0"
|
||||
RUN_STATUS="success"
|
||||
echo "=== Completable from anywhere (--dry-run: commands only) ==="
|
||||
echo ""
|
||||
echo "1. Config validation: bash scripts/validation/validate-config-files.sh [--dry-run]"
|
||||
echo "2. On-chain check (138): SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true"
|
||||
echo "3. All validation: bash scripts/verify/run-all-validation.sh --skip-genesis (includes cW* mesh matrix when pair-discovery JSON exists)"
|
||||
echo "4. Reconcile .env: bash scripts/verify/reconcile-env-canonical.sh --print"
|
||||
echo "4. Non-EVM status: bash scripts/verify/check-non-evm-network-health.sh --json-out reports/status/non-evm-network-health-latest.json && python3 scripts/verify/build-non-evm-lane-status.py"
|
||||
echo "5. Reconcile .env: bash scripts/verify/reconcile-env-canonical.sh --print"
|
||||
echo ""
|
||||
echo "Run without --dry-run to execute. Exit 0 = success."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=== Completable from anywhere (no LAN/creds) ==="
|
||||
echo "Started (UTC): $RUN_STARTED_AT"
|
||||
echo ""
|
||||
|
||||
# 1. Config validation
|
||||
echo "[Step 1/4] Config validation..."
|
||||
echo "[Step 1/5] Config validation..."
|
||||
STEP_STARTED=$SECONDS
|
||||
bash scripts/validation/validate-config-files.sh
|
||||
run_summary_record_step "1" "Config validation" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
|
||||
# 2. On-chain contract check (Chain 138) — may warn if RPC unreachable
|
||||
echo "[Step 2/4] On-chain contract check (Chain 138)..."
|
||||
echo "[Step 2/5] On-chain contract check (Chain 138)..."
|
||||
STEP_STARTED=$SECONDS
|
||||
SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true
|
||||
run_summary_record_step "2" "On-chain contract check (Chain 138)" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
|
||||
# 3. Full validation (skip genesis to avoid RPC; includes cW* mesh matrix when pair-discovery JSON exists)
|
||||
echo "[Step 3/4] Run all validation (--skip-genesis)..."
|
||||
echo "[Step 3/5] Run all validation (--skip-genesis)..."
|
||||
STEP_STARTED=$SECONDS
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
run_summary_record_step "3" "Run all validation (--skip-genesis)" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
|
||||
# 4. Emit canonical .env lines for reconciliation
|
||||
echo "[Step 4/4] Canonical .env (reconcile smom-dbis-138/.env)..."
|
||||
# 4. Non-EVM public health + repo-backed lane status
|
||||
echo "[Step 4/5] Non-EVM public health + lane status..."
|
||||
STEP_STARTED=$SECONDS
|
||||
bash scripts/verify/check-non-evm-network-health.sh --json-out reports/status/non-evm-network-health-latest.json
|
||||
python3 scripts/verify/build-non-evm-lane-status.py
|
||||
run_summary_record_step "4" "Non-EVM public health + lane status" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
|
||||
# 5. Emit canonical .env lines for reconciliation
|
||||
echo "[Step 5/5] Canonical .env (reconcile smom-dbis-138/.env)..."
|
||||
STEP_STARTED=$SECONDS
|
||||
bash scripts/verify/reconcile-env-canonical.sh --print
|
||||
run_summary_record_step "5" "Canonical .env reconciliation output" "success" "$((SECONDS - STEP_STARTED))"
|
||||
echo " Completed in $(format_duration "$((SECONDS - STEP_STARTED))")"
|
||||
echo ""
|
||||
|
||||
echo "=== Done. Tasks requiring LAN or credentials: run scripts/run-operator-tasks-from-lan.sh from a host on LAN with NPM_PASSWORD/PRIVATE_KEY set. ==="
|
||||
echo "=== Done. Tasks requiring LAN or credentials: run scripts/run-all-operator-tasks-from-lan.sh from a host on LAN with NPM_PASSWORD/PRIVATE_KEY set. ==="
|
||||
echo "Total elapsed: $(format_duration "$SECONDS")"
|
||||
RUN_STATUS="success"
|
||||
exit 0
|
||||
|
||||
@@ -9,24 +9,68 @@ set -euo pipefail
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$REPO_ROOT"
|
||||
|
||||
# shellcheck source=./scripts/lib/run-summary.sh
|
||||
source "$REPO_ROOT/scripts/lib/run-summary.sh"
|
||||
|
||||
RUN_STARTED_AT="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
||||
SECONDS=0
|
||||
|
||||
DRY_RUN=""
|
||||
WAVE_FILTER=""
|
||||
VALID_WAVES="E0 E1 E2 E3 E4 E5 E6 E7"
|
||||
JSON_OUT=""
|
||||
|
||||
format_duration() {
|
||||
local total="$1"
|
||||
printf '%02dm:%02ds' "$((total / 60))" "$((total % 60))"
|
||||
}
|
||||
|
||||
wave_valid() {
|
||||
[[ " $VALID_WAVES " == *" $1 "* ]]
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--dry-run) DRY_RUN=1; shift ;;
|
||||
--wave) WAVE_FILTER="$2"; shift 2 ;;
|
||||
*) shift ;;
|
||||
--wave)
|
||||
[[ $# -ge 2 ]] || { echo "Missing value for --wave. Valid: $VALID_WAVES" >&2; exit 1; }
|
||||
WAVE_FILTER="$2"
|
||||
wave_valid "$WAVE_FILTER" || { echo "Invalid wave: $WAVE_FILTER. Valid: $VALID_WAVES" >&2; exit 1; }
|
||||
shift 2
|
||||
;;
|
||||
--json-out)
|
||||
[[ $# -ge 2 ]] || { echo "Missing value for --json-out" >&2; exit 1; }
|
||||
JSON_OUT="$2"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
sed -n '1,8p' "$0"
|
||||
echo " --json-out PATH Write a machine-readable wave summary JSON."
|
||||
echo "Valid waves: $VALID_WAVES"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
SMOM="${REPO_ROOT}/smom-dbis-138"
|
||||
LOG_DIR="/tmp/e2e-full-parallel-$(date +%Y%m%d-%H%M%S)"
|
||||
mkdir -p "$LOG_DIR"
|
||||
RUN_STATUS="failed"
|
||||
RUN_MODE="run"
|
||||
run_summary_init "run-e2e-flow-tasks-full-parallel.sh" "$RUN_STARTED_AT" "$JSON_OUT"
|
||||
trap 'run_summary_write "$RUN_STATUS" "$SECONDS" "$RUN_MODE"; run_summary_cleanup' EXIT
|
||||
|
||||
log() { echo "[$(date +%H:%M:%S)] $*"; }
|
||||
run_wave() {
|
||||
local w="$1"
|
||||
if [[ -n "$WAVE_FILTER" && "$w" != "$WAVE_FILTER" ]]; then return 1; fi
|
||||
if [[ -n "$WAVE_FILTER" && "$w" != "$WAVE_FILTER" ]]; then
|
||||
run_summary_record_step "$w" "Wave $w" "filtered" "0"
|
||||
return 1
|
||||
fi
|
||||
log "=== Wave $w ==="
|
||||
return 0
|
||||
}
|
||||
@@ -34,41 +78,61 @@ run_wave() {
|
||||
# ----- E0: Gates (operator only — print checklist) -----
|
||||
run_e0() {
|
||||
run_wave "E0" || return 0
|
||||
local step_started=$SECONDS
|
||||
log "E0 (Operator): Ensure X1 RPC 2101 writable, X6 TransactionMirror deployed, X7 deployer funded on 138 and destinations, A1 Core RPC reachable."
|
||||
log " See: docs/03-deployment/RPC_2101_READONLY_FIX.md, health-check-rpc-2101.sh, ADDRESS_MATRIX_AND_STATUS.md"
|
||||
if [[ -n "$DRY_RUN" ]]; then return 0; fi
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
run_summary_record_step "E0" "Wave E0" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
# Optional: run health check if script exists
|
||||
if [[ -f "$REPO_ROOT/scripts/health/check-rpc-vms-health.sh" ]]; then
|
||||
( "$REPO_ROOT/scripts/health/check-rpc-vms-health.sh" 2>&1 | tee "$LOG_DIR/e0-health.log" ) || true
|
||||
fi
|
||||
run_summary_record_step "E0" "Wave E0" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- E1: Flow A — PMM pools Chain 138 (parallel inside Phase 1) -----
|
||||
run_e1() {
|
||||
run_wave "E1" || return 0
|
||||
if [[ ! -d "$SMOM" ]]; then log "Skip E1: smom-dbis-138 not found"; return 0; fi
|
||||
local step_started=$SECONDS
|
||||
if [[ ! -d "$SMOM" ]]; then
|
||||
log "Skip E1: smom-dbis-138 not found"
|
||||
run_summary_record_step "E1" "Wave E1" "skipped" "0"
|
||||
return 0
|
||||
fi
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
log "[DRY RUN] Would run: cd smom-dbis-138 && ./scripts/deployment/run-pmm-full-parity-all-phases.sh (RUN_PHASE2=0)"
|
||||
run_summary_record_step "E1" "Wave E1" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
( cd "$SMOM" && RUN_PHASE2=0 ./scripts/deployment/run-pmm-full-parity-all-phases.sh 2>&1 | tee "$LOG_DIR/e1-pmm-phase1.log" ) || true
|
||||
run_summary_record_step "E1" "Wave E1" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- E2: Flow B — CCIP config + fund bridges (per-chain parallel) -----
|
||||
run_e2() {
|
||||
run_wave "E2" || return 0
|
||||
if [[ ! -d "$SMOM" ]]; then log "Skip E2: smom-dbis-138 not found"; return 0; fi
|
||||
local step_started=$SECONDS
|
||||
if [[ ! -d "$SMOM" ]]; then
|
||||
log "Skip E2: smom-dbis-138 not found"
|
||||
run_summary_record_step "E2" "Wave E2" "skipped" "0"
|
||||
return 0
|
||||
fi
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
log "[DRY RUN] Would run: complete-config-ready-chains.sh, fund-ccip-bridges-with-link.sh"
|
||||
run_summary_record_step "E2" "Wave E2" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
( cd "$SMOM" && ./scripts/deployment/complete-config-ready-chains.sh 2>&1 | tee "$LOG_DIR/e2-config.log" ) || true
|
||||
( cd "$SMOM" && ./scripts/deployment/fund-ccip-bridges-with-link.sh 2>&1 | tee "$LOG_DIR/e2-fund.log" ) || true
|
||||
run_summary_record_step "E2" "Wave E2" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- E3: Code/config — token-aggregation env, bridge routes, token list -----
|
||||
run_e3() {
|
||||
run_wave "E3" || return 0
|
||||
local step_started=$SECONDS
|
||||
log "E3: Ensure .env has CHAIN_138_DODO_PMM_INTEGRATION, BRIDGE_REGISTRY_ADDRESS (see smom-dbis-138/env.additions.example)."
|
||||
if [[ -f "$SMOM/.env" ]]; then
|
||||
if grep -q "CHAIN_138_DODO_PMM_INTEGRATION" "$SMOM/.env" 2>/dev/null; then
|
||||
@@ -82,52 +146,80 @@ run_e3() {
|
||||
log " Add BRIDGE_REGISTRY_ADDRESS for Flow C quote API (orchestration)"
|
||||
fi
|
||||
fi
|
||||
if [[ -n "$DRY_RUN" ]]; then return 0; fi
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
run_summary_record_step "E3" "Wave E3" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
# Validation that can run from anywhere
|
||||
if [[ -f "$REPO_ROOT/scripts/validation/validate-config-files.sh" ]]; then
|
||||
( "$REPO_ROOT/scripts/validation/validate-config-files.sh" 2>&1 | tee "$LOG_DIR/e3-validate.log" ) || true
|
||||
fi
|
||||
run_summary_record_step "E3" "Wave E3" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- E4: Infra/verify (operator/LAN) -----
|
||||
run_e4() {
|
||||
run_wave "E4" || return 0
|
||||
local step_started=$SECONDS
|
||||
log "E4 (Operator/LAN): X2 Blockscout verify, X3 E2E routing, X4 Explorer E2E, X5 token-aggregation health."
|
||||
log " run-contract-verification-with-proxy.sh, verify-end-to-end-routing.sh, explorer e2e-test-explorer.sh"
|
||||
if [[ -n "$DRY_RUN" ]]; then return 0; fi
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
run_summary_record_step "E4" "Wave E4" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
if [[ -f "$REPO_ROOT/scripts/verify/verify-end-to-end-routing.sh" ]]; then
|
||||
( "$REPO_ROOT/scripts/verify/verify-end-to-end-routing.sh" 2>&1 | tee "$LOG_DIR/e4-routing.log" ) || true
|
||||
fi
|
||||
run_summary_record_step "E4" "Wave E4" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- E5: Multichain — PMM Phase 2 (parallel per chain) -----
|
||||
run_e5() {
|
||||
run_wave "E5" || return 0
|
||||
if [[ ! -d "$SMOM" ]]; then log "Skip E5: smom-dbis-138 not found"; return 0; fi
|
||||
local step_started=$SECONDS
|
||||
if [[ ! -d "$SMOM" ]]; then
|
||||
log "Skip E5: smom-dbis-138 not found"
|
||||
run_summary_record_step "E5" "Wave E5" "skipped" "0"
|
||||
return 0
|
||||
fi
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
log "[DRY RUN] Would run: run-pmm-full-parity-all-phases.sh (RUN_PHASE1=0) for L2s"
|
||||
run_summary_record_step "E5" "Wave E5" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
( cd "$SMOM" && RUN_PHASE1=0 ./scripts/deployment/run-pmm-full-parity-all-phases.sh 2>&1 | tee "$LOG_DIR/e5-pmm-phase2.log" ) || true
|
||||
run_summary_record_step "E5" "Wave E5" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- E6: Frontend + test (code/operator) -----
|
||||
run_e6() {
|
||||
run_wave "E6" || return 0
|
||||
local step_started=$SECONDS
|
||||
log "E6: B6 Bridge UI to routes+token mapping; B7 test 138↔dest; C5–C7 destination DEX, full path quote UI, E2E test."
|
||||
log " See TASKS_TO_INCREASE_ALL_E2E_FLOWS.md Flow B/C."
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
run_summary_record_step "E6" "Wave E6" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
run_summary_record_step "E6" "Wave E6" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- E7: Docs -----
|
||||
run_e7() {
|
||||
run_wave "E7" || return 0
|
||||
local step_started=$SECONDS
|
||||
log "E7: Update docs/11-references/PMM_DEX_ROUTING_STATUS.md when pools/liquidity live; runbooks in DEX_AND_AGGREGATORS_CHAIN138_EXPLAINER, CONFIG_READY_CHAINS."
|
||||
if [[ -n "$DRY_RUN" ]]; then return 0; fi
|
||||
if [[ -n "$DRY_RUN" ]]; then
|
||||
run_summary_record_step "E7" "Wave E7" "planned" "0"
|
||||
return 0
|
||||
fi
|
||||
# No automated doc edit; operator updates when state changes
|
||||
run_summary_record_step "E7" "Wave E7" "success" "$((SECONDS - step_started))"
|
||||
}
|
||||
|
||||
# ----- Run waves (E1+E2+E3 can run in parallel; E5 after E1 if both run) -----
|
||||
log "E2E Full Parallel — DRY_RUN=$DRY_RUN WAVE_FILTER=$WAVE_FILTER Logs: $LOG_DIR"
|
||||
log "Started (UTC): $RUN_STARTED_AT"
|
||||
run_e0
|
||||
# E1 and E2 are independent; E3 is config check — run E1 and E2 in parallel when not dry-run
|
||||
if [[ -z "$DRY_RUN" && -z "$WAVE_FILTER" ]]; then
|
||||
@@ -146,3 +238,6 @@ run_e5
|
||||
run_e6
|
||||
run_e7
|
||||
log "Done. Logs in $LOG_DIR"
|
||||
log "Total elapsed: $(format_duration "$SECONDS")"
|
||||
RUN_MODE=$([[ -n "$DRY_RUN" ]] && echo "dry-run" || echo "run")
|
||||
RUN_STATUS="success"
|
||||
|
||||
@@ -358,6 +358,7 @@ main() {
|
||||
"dbis-api" # dbis-api.d-bis.org
|
||||
"dbis-api-2" # dbis-api-2.d-bis.org
|
||||
"secure" # secure.d-bis.org
|
||||
"mail" # mail.d-bis.org (A → PUBLIC_IP; use with mail MX/SPF script)
|
||||
)
|
||||
if ! process_zone "$ZONE_D_BIS_ORG" "d-bis.org" "${DBIS_RECORDS[@]}"; then
|
||||
((total_failures++))
|
||||
|
||||
@@ -90,15 +90,17 @@ log_info "Step 1: Backing up NPMplus database..."
|
||||
DB_BACKUP_DIR="$BACKUP_DIR/database"
|
||||
mkdir -p "$DB_BACKUP_DIR"
|
||||
|
||||
# Discover database.sqlite (path differs across images / mount layouts)
|
||||
# Discover database.sqlite (path differs: legacy NPM /data/* vs zoeyvid NPMplus
|
||||
# on /opt/npmplus with live DB at /opt/npmplus/npmplus/database.sqlite → /data/npmplus/ in Docker)
|
||||
NPM_DB_PATH=$(
|
||||
npm_lxc_ssh "pct exec $NPMPLUS_VMID -- sh -c '
|
||||
for p in /data/database.sqlite /data/database/database.sqlite; do
|
||||
[ -f \"\$p\" ] && { echo \"\$p\"; exit 0; }
|
||||
for p in /opt/npmplus/npmplus/database.sqlite /opt/npmplus/database.sqlite /data/npmplus/database.sqlite /data/database.sqlite /data/database/database.sqlite; do
|
||||
if [ -f \"\$p\" ] && [ -s \"\$p\" ]; then echo \"\$p\"; exit 0; fi
|
||||
done
|
||||
f=\$(find /data -maxdepth 6 -name database.sqlite 2>/dev/null | head -1)
|
||||
f=\"\"
|
||||
f=\$(find /opt/npmplus /data -maxdepth 8 -name database.sqlite -size +0c 2>/dev/null | head -1)
|
||||
if [ -n \"\$f\" ] && [ -f \"\$f\" ]; then echo \"\$f\"; else echo \"\"; fi
|
||||
'" 2>/dev/null | tr -d '\r' || true
|
||||
'" 2>/dev/null | tr -d '\r' | head -1 || true
|
||||
)
|
||||
NPM_DB_PATH="${NPM_DB_PATH//$'\n'/}"
|
||||
if [ -n "$NPM_DB_PATH" ]; then
|
||||
@@ -106,14 +108,25 @@ if [ -n "$NPM_DB_PATH" ]; then
|
||||
else
|
||||
log_info " No database.sqlite at common paths; dump may be skipped (check container / mounts)"
|
||||
fi
|
||||
# Method 1: SQL dump
|
||||
# Method 1: SQL dump (sqlite3 if present, else Python sqlite3 in the LXC — zoeyvid Docker image has no sqlite3)
|
||||
log_info " Creating SQL dump..."
|
||||
NPM_DB_PQ=""
|
||||
NPM_PATH_QUOTED=""
|
||||
[ -n "$NPM_DB_PATH" ] && NPM_DB_PQ=$(printf %q "$NPM_DB_PATH")
|
||||
[ -n "$NPM_DB_PATH" ] && NPM_PATH_QUOTED=$(printf %q "$NPM_DB_PATH")
|
||||
if [ -n "$NPM_DB_PATH" ]; then
|
||||
npm_lxc_ssh "pct exec $NPMPLUS_VMID -- sh -c \"if command -v sqlite3 >/dev/null 2>&1; then sqlite3 $NPM_DB_PQ .dump; else echo _NO_SQLITE3; fi\"" \
|
||||
> "$DB_BACKUP_DIR/database.sql" 2>/dev/null || : > "$DB_BACKUP_DIR/database.sql"
|
||||
if grep -qxF '_NO_SQLITE3' "$DB_BACKUP_DIR/database.sql" 2>/dev/null; then
|
||||
if ! npm_lxc_ssh "pct exec $NPMPLUS_VMID -- sh -c \"
|
||||
if command -v sqlite3 >/dev/null 2>&1; then
|
||||
sqlite3 $NPM_DB_PQ .dump
|
||||
elif command -v python3 >/dev/null 2>&1; then
|
||||
python3 -c 'import sqlite3,sys; [print(x) for x in sqlite3.connect(sys.argv[1]).iterdump()]' $NPM_PATH_QUOTED
|
||||
else
|
||||
echo _NO_DUMP_TOOL
|
||||
fi
|
||||
\"" > "$DB_BACKUP_DIR/database.sql" 2>/dev/null; then
|
||||
: > "$DB_BACKUP_DIR/database.sql"
|
||||
fi
|
||||
if grep -qxF '_NO_DUMP_TOOL' "$DB_BACKUP_DIR/database.sql" 2>/dev/null; then
|
||||
: > "$DB_BACKUP_DIR/database.sql"
|
||||
fi
|
||||
else
|
||||
@@ -139,7 +152,7 @@ fi
|
||||
if ( [ -s "$DB_BACKUP_DIR/database.sql" ] && grep -qiE 'CREATE|INSERT|PRAGMA' "$DB_BACKUP_DIR/database.sql" 2>/dev/null ) || [ -s "$DB_BACKUP_DIR/database.sqlite" ]; then
|
||||
log_success " Database backup completed"
|
||||
else
|
||||
log_warn " Database backup empty — LXC not reachable, DB path changed, or sqlite3 missing in container"
|
||||
log_warn " Database backup empty — LXC not reachable, or DB not under /opt/npmplus/**/database.sqlite (see script)"
|
||||
fi
|
||||
|
||||
# Step 2: Export Proxy Hosts via API
|
||||
@@ -191,7 +204,8 @@ CERT_BACKUP_DIR="$BACKUP_DIR/certificates"
|
||||
mkdir -p "$CERT_BACKUP_DIR"
|
||||
|
||||
CERT_LIVE_BASE=""
|
||||
for _try in /data/tls/certbot/live /etc/letsencrypt/live /data/letsencrypt/live; do
|
||||
# zoeyvid NPMplus: live certs under /opt/npmplus/tls/certbot/live; /etc/letsencrypt/live may include non-PEM "README"
|
||||
for _try in /opt/npmplus/tls/certbot/live /data/tls/certbot/live /etc/letsencrypt/live /data/letsencrypt/live; do
|
||||
if npm_lxc_ssh "pct exec $NPMPLUS_VMID -- test -d '$_try'" 2>/dev/null; then
|
||||
CERT_LIVE_BASE="$_try"
|
||||
break
|
||||
@@ -213,7 +227,11 @@ fi
|
||||
if [ -s "$CERT_BACKUP_DIR/cert_list.txt" ]; then
|
||||
log_info " Copying certificate files..."
|
||||
while IFS= read -r cert_dir; do
|
||||
if [ -n "$cert_dir" ] && [ "$cert_dir" != "lost+found" ]; then
|
||||
if [ -n "$cert_dir" ] && [ "$cert_dir" != "lost+found" ] && [ "$cert_dir" != "README" ]; then
|
||||
if ! npm_lxc_ssh "pct exec $NPMPLUS_VMID -- test -f $CERT_LIVE_BASE/$cert_dir/fullchain.pem" 2>/dev/null; then
|
||||
log_info " Skipping $cert_dir (no fullchain.pem — not a cert directory)"
|
||||
continue
|
||||
fi
|
||||
mkdir -p "$CERT_BACKUP_DIR/$cert_dir"
|
||||
npm_lxc_ssh "pct exec $NPMPLUS_VMID -- cat $CERT_LIVE_BASE/$cert_dir/fullchain.pem" > "$CERT_BACKUP_DIR/$cert_dir/fullchain.pem" 2>/dev/null || {
|
||||
log_warn " Failed to copy fullchain.pem for $cert_dir"
|
||||
@@ -257,6 +275,8 @@ cat > "$BACKUP_DIR/manifest.json" <<EOF
|
||||
"npmplus_vmid": "$NPMPLUS_VMID",
|
||||
"npmplus_host": "$NPMPLUS_HOST",
|
||||
"npm_url": "$NPM_URL",
|
||||
"database_path_resolved": "${NPM_DB_PATH:-}",
|
||||
"cert_live_base": "${CERT_LIVE_BASE:-}",
|
||||
"backup_contents": {
|
||||
"database": {
|
||||
"sql_dump": "$([ -s "$DB_BACKUP_DIR/database.sql" ] && echo "present" || echo "missing")",
|
||||
|
||||
Reference in New Issue
Block a user