Compare commits

..

2 Commits

Author SHA1 Message Date
f2e0434ad6 PR AB: complete Phoenix deployment scaffolding (add 3 files referenced by main 4a1f69a) (#32)
Some checks failed
Deploy to Phoenix / deploy (push) Failing after 7s
Adds webapp-nginx.conf, systemd/currencicombo-orchestrator.service, and install-prune-cron.sh — all three referenced by main's existing install.sh / deploy script / webapp.service / README but missing from the 4a1f69a commit. Byte-identical to PR #31 branch ded7d24.

Closes gap so CT 8604 can boot cleanly.
2026-04-23 04:39:36 +00:00
defiQUG
4a1f69a8e5 deploy: make Phoenix redeploys archive-safe
Some checks failed
Deploy to Phoenix / deploy (push) Failing after 5s
phoenix-deploy Deploy failed: Command failed: bash scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh [currencicombo-phoenix] packing s
2026-04-22 20:05:35 -07:00
20 changed files with 1195 additions and 533 deletions

View File

@@ -1,20 +0,0 @@
**/node_modules
**/.git
**/.github
**/dist
**/build
**/.vscode
**/.idea
**/.DS_Store
**/.env
**/.env.local
**/.env.*.local
**/coverage
**/*.log
**/npm-debug.log*
orchestrator/dist
orchestrator/coverage
contracts/cache
contracts/artifacts
terraform
k8s

View File

@@ -1,55 +0,0 @@
# CurrenciCombo sandbox env — copy to `.env.sandbox` and edit.
#
# cp .env.sandbox.example .env.sandbox
# docker compose --env-file .env.sandbox up -d
#
# `EVENT_SIGNING_SECRET` and `ORCHESTRATOR_API_KEYS` are REQUIRED —
# orchestrator will refuse to boot without them (see PR I boot-time
# env assertions in orchestrator/src/config/env.ts).
# ---- Postgres ----
POSTGRES_DB=currencicombo
POSTGRES_USER=currencicombo
POSTGRES_PASSWORD=currencicombo
POSTGRES_PORT=5432
# ---- Redis ----
REDIS_PORT=6379
# ---- Orchestrator ----
ORCHESTRATOR_PORT=8080
# 32+ random bytes, hex-encoded. Generate with:
# openssl rand -hex 32
EVENT_SIGNING_SECRET=change-me-to-openssl-rand-hex-32
# Comma-separated `key:role` pairs; role ∈ {initiator, settler, auditor}
# Generate a key with:
# openssl rand -hex 16
ORCHESTRATOR_API_KEYS=local-demo-key:initiator,local-settler-key:settler,local-auditor-key:auditor
# ---- Chain 138 (EXT-CHAIN138-CI-RPC resolved by default) ----
CHAIN_138_RPC_URL=https://rpc.public-0138.defi-oracle.io
# Published by `contracts/scripts/deploy-notary-registry.ts` once you
# deploy NotaryRegistry.sol. Leave blank to run in mock-anchor mode.
NOTARY_REGISTRY_ADDRESS=
# Funded signer for on-chain anchors. Leave blank to run in mock-anchor
# mode (orchestrator logs "[NotaryChain] mock anchor — reason: notary
# envs not set" when unset).
ORCHESTRATOR_PRIVATE_KEY=
# ---- External blockers (leave blank to run in sandbox/mock mode) ----
# EXT-DBIS-CORE — flip when dbis_core is deployed
DBIS_CORE_URL=
# EXT-FIN-GATEWAY — flip when real FIN / Alliance Access gateway is provisioned
FIN_SANDBOX_URL=
# cc-identity-core HTTP base URL
CC_IDENTITY_URL=
# cc-compliance-controls matrix JSON URL (optional — embedded v0 is used if blank)
CC_CONTROLS_MATRIX_URL=
# ---- Portal (Vite) ----
PORTAL_PORT=3000
# Baked into the portal bundle at build time. Must be the URL the
# browser uses to reach the orchestrator (usually localhost + the
# published ORCHESTRATOR_PORT). Leave blank to run the portal in its
# built-in demo-fallback mode.
VITE_ORCHESTRATOR_URL=http://localhost:8080

View File

@@ -0,0 +1,22 @@
name: Deploy to Phoenix
on:
push:
branches: [main, master]
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Trigger Phoenix deployment
run: |
SHA="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
curl -sSf -X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
-H "Content-Type: application/json" \
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"default\"}"

1
.gitignore vendored
View File

@@ -78,4 +78,3 @@ pnpm-lock.yaml
# Misc
*.pem
*.key
.env.sandbox

View File

@@ -1,45 +1,39 @@
# Multi-stage build for the CurrenciCombo portal (Vite + React).
#
# Context MUST be the repo root so the vite build can see src/, public/,
# index.html, etc.:
#
# docker build -t currencicombo/portal:local .
#
# VITE_ORCHESTRATOR_URL is baked at build time (Vite inlines env vars
# prefixed with VITE_). In a sandbox compose, set it to whatever URL
# the browser uses to reach the orchestrator — typically
# http://localhost:8080 if the orchestrator's port is published on the
# host. When unset, the portal runs in its built-in demo-fallback mode
# (see src/services/orchestrator.ts).
# Multi-stage Dockerfile for orchestrator service
FROM node:18-alpine AS builder
# ------- build stage -------
FROM node:20-alpine AS build
WORKDIR /app
ARG VITE_ORCHESTRATOR_URL=""
ENV VITE_ORCHESTRATOR_URL=${VITE_ORCHESTRATOR_URL}
# Copy package files
COPY orchestrator/package*.json ./
RUN npm ci
COPY package.json package-lock.json ./
# vite 7 ships @rolldown/binding-* as platform-matched optional deps,
# so we MUST include optional deps (skipping them breaks `vite build`
# with "Cannot find native binding"). `fsevents` is also optional but
# darwin-only; on linux npm 10 trips EBADPLATFORM on the lockfile
# entry even though the runtime would never load it. `--force` downgrades
# that EBADPLATFORM to a warning while still installing the rolldown
# binding for the current platform.
RUN npm install --include=optional --force --no-audit --no-fund --ignore-scripts
COPY tsconfig.json tsconfig.app.json tsconfig.node.json vite.config.ts index.html eslint.config.js ./
COPY public ./public
COPY src ./src
# Copy source
COPY orchestrator/ ./
# Build
RUN npm run build
# ------- runtime stage -------
FROM nginx:1.27-alpine AS runtime
COPY nginx.conf /etc/nginx/conf.d/default.conf
COPY --from=build /app/dist /usr/share/nginx/html
EXPOSE 80
# Production stage
FROM node:18-alpine
WORKDIR /app
# Copy package files
COPY orchestrator/package*.json ./
# Install production dependencies only
RUN npm ci --only=production
# Copy built files
COPY --from=builder /app/dist ./dist
# Expose port
EXPOSE 8080
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node -e "require('http').get('http://localhost:8080/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})"
# Start application
CMD ["node", "dist/index.js"]
HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \
CMD wget -q --spider http://127.0.0.1/ || exit 1

View File

@@ -15,18 +15,6 @@ const config: HardhatUserConfig = {
hardhat: {
chainId: 1337,
},
// Public Chain 138 RPC — resolves proxmox blocker EXT-CHAIN138-CI-RPC.
// Deployer key is only read when a tx is actually sent (e.g. via
// `npx hardhat --network chain138 run scripts/deploy-notary-registry.ts`);
// leaving NOTARY_DEPLOYER_PRIVATE_KEY unset is safe for read-only
// flows like `hardhat console --network chain138`.
chain138: {
url: process.env.NOTARY_RPC_URL || "https://rpc.public-0138.defi-oracle.io",
chainId: 138,
accounts: process.env.NOTARY_DEPLOYER_PRIVATE_KEY
? [process.env.NOTARY_DEPLOYER_PRIVATE_KEY]
: [],
},
},
paths: {
sources: "./",

View File

@@ -1,243 +0,0 @@
/**
* Dedicated NotaryRegistry deploy script.
*
* Self-compiles NotaryRegistry.sol + its two interfaces + the OpenZeppelin
* Ownable dependency via solc-js in-process, so it does NOT depend on
* `hardhat compile` (hardhat's source-glob picks up node_modules under
* contracts/ and trips HH1006 on this repo — see E2E helper
* orchestrator/tests/e2e/helpers/compileNotaryRegistry.ts for the same
* trick).
*
* Environment inputs (all read from `process.env`, no CLI args):
*
* NOTARY_RPC_URL RPC endpoint (required unless NOTARY_DRY_RUN=1)
* NOTARY_DEPLOYER_PRIVATE_KEY Hex-encoded funded deployer key (required unless NOTARY_DRY_RUN=1)
* NOTARY_INITIAL_OWNER Address that receives ownership (defaults to deployer)
* NOTARY_DRY_RUN "1" to compile + print calldata shape + skip sending
*
* Usage:
*
* # From contracts/:
* NOTARY_RPC_URL=https://rpc.public-0138.defi-oracle.io \
* NOTARY_DEPLOYER_PRIVATE_KEY=0x... \
* npx ts-node scripts/deploy-notary-registry.ts
*
* # Dry run (no RPC contact, no key required — CI smoke test):
* NOTARY_DRY_RUN=1 npx ts-node scripts/deploy-notary-registry.ts
*
* The script prints a machine-readable JSON envelope as its LAST line so
* callers (Makefile, CI, scripts piping into .env.sandbox) can grep the
* address out:
*
* {"contract":"NotaryRegistry","address":"0x...","txHash":"0x...","chainId":138}
*/
import { readFileSync } from "node:fs";
import { dirname, join, resolve } from "node:path";
import { ContractFactory, JsonRpcProvider, Wallet, isAddress } from "ethers";
// eslint-disable-next-line @typescript-eslint/no-require-imports, @typescript-eslint/no-var-requires
const solc = require("solc");
const CONTRACTS_ROOT = resolve(__dirname, "..");
const OZ_ROOT = join(CONTRACTS_ROOT, "node_modules", "@openzeppelin");
type AbiFragment = Record<string, unknown>;
interface CompiledArtifact {
abi: AbiFragment[];
bytecode: string;
}
interface SolcSource {
content: string;
}
interface SolcInput {
language: "Solidity";
sources: Record<string, SolcSource>;
settings: {
optimizer: { enabled: true; runs: number };
outputSelection: Record<string, Record<string, string[]>>;
};
}
interface SolcOutput {
errors?: Array<{ severity: "error" | "warning"; formattedMessage: string }>;
contracts: Record<
string,
Record<string, { abi: AbiFragment[]; evm: { bytecode: { object: string } } }>
>;
}
function findImports(requestedPath: string): { contents: string } | { error: string } {
if (requestedPath.startsWith("@openzeppelin/")) {
const rel = requestedPath.replace("@openzeppelin/", "");
try {
return { contents: readFileSync(join(OZ_ROOT, rel), "utf8") };
} catch (e) {
return { error: `Could not read ${requestedPath}: ${(e as Error).message}` };
}
}
try {
return { contents: readFileSync(join(CONTRACTS_ROOT, requestedPath), "utf8") };
} catch (e) {
return { error: (e as Error).message };
}
}
function collectSources(entryPath: string): Record<string, SolcSource> {
const sources: Record<string, SolcSource> = {};
const stack: string[] = [entryPath];
const seen = new Set<string>();
while (stack.length > 0) {
const cur = stack.pop()!;
if (seen.has(cur)) continue;
seen.add(cur);
let content: string;
if (cur === entryPath) {
content = readFileSync(join(CONTRACTS_ROOT, "NotaryRegistry.sol"), "utf8");
} else {
const resolved = findImports(cur);
if ("error" in resolved) {
throw new Error(`Unresolved import: ${cur} (${resolved.error})`);
}
content = resolved.contents;
}
sources[cur] = { content };
const importRe = /^\s*import\s+(?:\{[^}]+\}\s+from\s+)?"([^"]+)";/gm;
let m: RegExpExecArray | null;
while ((m = importRe.exec(content)) !== null) {
const rawImport = m[1];
let normalised: string;
if (rawImport.startsWith("@openzeppelin/")) {
normalised = rawImport;
} else if (rawImport.startsWith("./") || rawImport.startsWith("../")) {
const curDir = cur.includes("/") ? dirname(cur) : ".";
const joined = join(curDir, rawImport);
normalised = joined.startsWith(".") ? joined.slice(2) : joined;
} else {
normalised = rawImport;
}
if (!seen.has(normalised)) stack.push(normalised);
}
}
return sources;
}
function compileNotaryRegistry(): CompiledArtifact {
const entry = "NotaryRegistry.sol";
const sources = collectSources(entry);
const input: SolcInput = {
language: "Solidity",
sources,
settings: {
optimizer: { enabled: true, runs: 200 },
outputSelection: { "*": { "*": ["abi", "evm.bytecode.object"] } },
},
};
const output: SolcOutput = JSON.parse(
solc.compile(JSON.stringify(input), { import: findImports }),
);
const fatal = (output.errors ?? []).filter((e) => e.severity === "error");
if (fatal.length > 0) {
throw new Error(
`[deploy-notary-registry] solc compile failed:\n${fatal
.map((e) => e.formattedMessage)
.join("\n")}`,
);
}
const artifact = output.contracts[entry]?.NotaryRegistry;
if (!artifact) {
throw new Error(
"[deploy-notary-registry] solc did not emit NotaryRegistry artifact",
);
}
return {
abi: artifact.abi,
bytecode: "0x" + artifact.evm.bytecode.object,
};
}
function require1(name: string): string {
const v = process.env[name];
if (!v) {
throw new Error(`[deploy-notary-registry] ${name} is required`);
}
return v;
}
async function main(): Promise<void> {
const dryRun = process.env.NOTARY_DRY_RUN === "1";
const artifact = compileNotaryRegistry();
if (dryRun) {
const initialOwner =
process.env.NOTARY_INITIAL_OWNER ||
"0x0000000000000000000000000000000000000001";
if (!isAddress(initialOwner)) {
throw new Error(
`[deploy-notary-registry] NOTARY_INITIAL_OWNER is not a valid address: ${initialOwner}`,
);
}
const factory = new ContractFactory(artifact.abi, artifact.bytecode);
const deployTx = await factory.getDeployTransaction(initialOwner);
const envelope = {
contract: "NotaryRegistry",
dryRun: true,
initialOwner,
bytecodeLength: artifact.bytecode.length,
calldataLength: (deployTx.data as string).length,
abiEntryCount: artifact.abi.length,
};
console.log(JSON.stringify(envelope));
return;
}
const rpcUrl = require1("NOTARY_RPC_URL");
const pk = require1("NOTARY_DEPLOYER_PRIVATE_KEY");
const provider = new JsonRpcProvider(rpcUrl, undefined, {
staticNetwork: true,
cacheTimeout: -1,
});
const wallet = new Wallet(pk, provider);
const deployerAddr = await wallet.getAddress();
const initialOwner = process.env.NOTARY_INITIAL_OWNER || deployerAddr;
if (!isAddress(initialOwner)) {
throw new Error(
`[deploy-notary-registry] NOTARY_INITIAL_OWNER is not a valid address: ${initialOwner}`,
);
}
const net = await provider.getNetwork();
const bal = await provider.getBalance(deployerAddr);
console.error(
`[deploy-notary-registry] deployer=${deployerAddr} chainId=${net.chainId} balance=${bal} initialOwner=${initialOwner}`,
);
if (bal === BigInt(0)) {
throw new Error(
`[deploy-notary-registry] deployer ${deployerAddr} has zero balance on chainId=${net.chainId}. Fund the account before deploying.`,
);
}
const factory = new ContractFactory(artifact.abi, artifact.bytecode, wallet);
const contract = await factory.deploy(initialOwner);
const receipt = await contract.deploymentTransaction()?.wait();
const address = await contract.getAddress();
const envelope = {
contract: "NotaryRegistry",
address,
txHash: receipt?.hash,
chainId: Number(net.chainId),
initialOwner,
};
console.log(JSON.stringify(envelope));
}
main().catch((err) => {
console.error(err);
process.exit(1);
});

View File

@@ -1,44 +1,28 @@
# CurrenciCombo sandbox stack — orchestrator + portal + Postgres + Redis.
#
# Usage:
#
# cp .env.sandbox.example .env.sandbox
# # edit .env.sandbox as needed
# docker compose --env-file .env.sandbox up -d
# curl http://localhost:${ORCHESTRATOR_PORT:-8080}/health
# curl http://localhost:${ORCHESTRATOR_PORT:-8080}/ready
# open http://localhost:${PORTAL_PORT:-3000}/
#
# External blockers from proxmox/scripts/verify/check-external-dependencies.sh
# surface in the orchestrator's boot-time log summary (see PR Y). Leaving
# DBIS_CORE_URL / FIN_SANDBOX_URL / CC_IDENTITY_URL unset is expected in
# the sandbox — the services fall back to deterministic mocks and tag
# the EXT-* blocker id in every log line.
#
# EXT-CHAIN138-CI-RPC is resolved out of the box: CHAIN_138_RPC_URL
# defaults to the public endpoint at https://rpc.public-0138.defi-oracle.io.
version: '3.8'
services:
# PostgreSQL database
postgres:
image: postgres:15-alpine
environment:
POSTGRES_DB: ${POSTGRES_DB:-currencicombo}
POSTGRES_USER: ${POSTGRES_USER:-currencicombo}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-currencicombo}
POSTGRES_DB: comboflow
POSTGRES_USER: comboflow
POSTGRES_PASSWORD: comboflow
ports:
- "${POSTGRES_PORT:-5432}:5432"
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-currencicombo} -d ${POSTGRES_DB:-currencicombo}"]
test: ["CMD-SHELL", "pg_isready -U comboflow"]
interval: 10s
timeout: 5s
retries: 5
# Redis cache
redis:
image: redis:7-alpine
ports:
- "${REDIS_PORT:-6379}:6379"
- "6379:6379"
volumes:
- redis_data:/data
healthcheck:
@@ -47,56 +31,43 @@ services:
timeout: 3s
retries: 5
# Orchestrator service
orchestrator:
build:
context: ./orchestrator
context: .
dockerfile: Dockerfile
image: currencicombo/orchestrator:local
ports:
- "${ORCHESTRATOR_PORT:-8080}:8080"
- "8080:8080"
environment:
NODE_ENV: production
PORT: "8080"
DATABASE_URL: postgresql://${POSTGRES_USER:-currencicombo}:${POSTGRES_PASSWORD:-currencicombo}@postgres:5432/${POSTGRES_DB:-currencicombo}
PORT: 8080
DATABASE_URL: postgresql://comboflow:comboflow@postgres:5432/comboflow
REDIS_URL: redis://redis:6379
# --- required for signed events (PR O) ---
EVENT_SIGNING_SECRET: ${EVENT_SIGNING_SECRET}
# --- API keys (PR M) — comma-separated key:role pairs ---
ORCHESTRATOR_API_KEYS: ${ORCHESTRATOR_API_KEYS}
# --- Chain 138 (EXT-CHAIN138-CI-RPC — resolved) ---
CHAIN_138_RPC_URL: ${CHAIN_138_RPC_URL:-https://rpc.public-0138.defi-oracle.io}
NOTARY_REGISTRY_ADDRESS: ${NOTARY_REGISTRY_ADDRESS:-}
ORCHESTRATOR_PRIVATE_KEY: ${ORCHESTRATOR_PRIVATE_KEY:-}
# --- External blockers (intentionally unset in sandbox) ---
DBIS_CORE_URL: ${DBIS_CORE_URL:-}
FIN_SANDBOX_URL: ${FIN_SANDBOX_URL:-}
CC_IDENTITY_URL: ${CC_IDENTITY_URL:-}
CC_CONTROLS_MATRIX_URL: ${CC_CONTROLS_MATRIX_URL:-}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "node", "-e", "require('http').get('http://127.0.0.1:8080/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"]
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
portal:
# Frontend
webapp:
build:
context: .
context: ./webapp
dockerfile: Dockerfile
args:
VITE_ORCHESTRATOR_URL: ${VITE_ORCHESTRATOR_URL:-http://localhost:8080}
image: currencicombo/portal:local
ports:
- "${PORTAL_PORT:-3000}:80"
- "3000:3000"
environment:
NODE_ENV: production
NEXT_PUBLIC_ORCH_URL: http://orchestrator:8080
depends_on:
orchestrator:
condition: service_healthy
- orchestrator
volumes:
postgres_data:
redis_data:

View File

@@ -1,28 +0,0 @@
server {
listen 80;
server_name _;
# Static SPA — vite build output lives here.
root /usr/share/nginx/html;
index index.html;
# Long-cache hashed assets produced by vite's rollup chunks.
location /assets/ {
access_log off;
expires 1y;
add_header Cache-Control "public, max-age=31536000, immutable";
try_files $uri =404;
}
# SPA fallback — every other path yields index.html so client-side
# react-router can take over (see src/App.tsx / <Routes>).
location / {
try_files $uri $uri/ /index.html;
}
# Defensive: no sourcemap exposure in sandbox.
location ~ \.map$ {
deny all;
return 404;
}
}

View File

@@ -1,54 +0,0 @@
# Multi-stage build for the CurrenciCombo orchestrator.
#
# Context MUST be the orchestrator/ directory so the build does not
# need to traverse the whole repo. Build from repo root with:
#
# docker build -t currencicombo/orchestrator:local -f orchestrator/Dockerfile orchestrator/
#
# or via docker-compose (see docker-compose.yml at repo root).
# ------- deps stage -------
FROM node:20-alpine AS deps
WORKDIR /app
COPY package.json package-lock.json ./
# `fsevents` is a darwin-only optional dep pulled in transitively via
# ganache + jest; npm 10's `ci` still validates the darwin-pinned
# entries on linux builders and fails with EBADPLATFORM. Use
# `npm install --omit=optional` to sidestep the strict check; we do
# not need reproducible nested optional resolutions for a runtime-only
# image (the tsc build only touches first-party deps).
RUN npm install --omit=optional --no-audit --no-fund --ignore-scripts
# ------- build stage -------
FROM node:20-alpine AS build
WORKDIR /app
COPY package.json package-lock.json ./
COPY --from=deps /app/node_modules ./node_modules
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
# ------- runtime stage -------
FROM node:20-alpine AS runtime
WORKDIR /app
ENV NODE_ENV=production
ENV PORT=8080
RUN apk add --no-cache dumb-init \
&& addgroup -S orchestrator \
&& adduser -S -G orchestrator orchestrator
COPY package.json package-lock.json ./
RUN npm install --omit=dev --omit=optional --no-audit --no-fund --ignore-scripts \
&& npm cache clean --force
COPY --from=build /app/dist ./dist
USER orchestrator
EXPOSE 8080
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD node -e "require('http').get('http://127.0.0.1:8080/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["node", "dist/index.js"]

View File

@@ -1,18 +1,13 @@
import { z, ZodTypeAny } from "zod";
import { z } from "zod";
/**
* Empty strings from `.env`-loaded variables (docker-compose with
* `NOTARY_REGISTRY_ADDRESS=` in .env.sandbox, Kubernetes `valueFrom`
* secrets that resolve to "", etc.) should validate identically to
* the variable being unset. Without this coercion, zod's
* `.regex(...).optional()` rejects `""` because the value IS provided.
*/
function emptyToUndefined<T extends ZodTypeAny>(schema: T) {
return z.preprocess(
(v) => (typeof v === "string" && v.length === 0 ? undefined : v),
schema,
);
}
const emptyToUndefined = (value: unknown) => {
if (typeof value !== "string") return value;
const trimmed = value.trim();
return trimmed === "" ? undefined : trimmed;
};
const optionalString = () => z.preprocess(emptyToUndefined, z.string().optional());
const optionalUrl = () => z.preprocess(emptyToUndefined, z.string().url().optional());
/**
* Environment variable validation schema
@@ -20,24 +15,26 @@ function emptyToUndefined<T extends ZodTypeAny>(schema: T) {
const envSchema = z.object({
NODE_ENV: z.enum(["development", "production", "test"]).default("development"),
PORT: z.string().transform(Number).pipe(z.number().int().positive()),
DATABASE_URL: emptyToUndefined(z.string().url().optional()),
API_KEYS: emptyToUndefined(z.string().optional()),
REDIS_URL: emptyToUndefined(z.string().url().optional()),
DATABASE_URL: optionalUrl(),
API_KEYS: optionalString(),
REDIS_URL: optionalUrl(),
LOG_LEVEL: z.enum(["error", "warn", "info", "debug"]).default("info"),
ALLOWED_IPS: emptyToUndefined(z.string().optional()),
ALLOWED_IPS: optionalString(),
SESSION_SECRET: z.string().min(32),
JWT_SECRET: emptyToUndefined(z.string().min(32).optional()),
AZURE_KEY_VAULT_URL: emptyToUndefined(z.string().url().optional()),
AWS_SECRETS_MANAGER_REGION: emptyToUndefined(z.string().optional()),
SENTRY_DSN: emptyToUndefined(z.string().url().optional()),
JWT_SECRET: z.preprocess(emptyToUndefined, z.string().min(32).optional()),
AZURE_KEY_VAULT_URL: optionalUrl(),
AWS_SECRETS_MANAGER_REGION: optionalString(),
SENTRY_DSN: optionalUrl(),
// Chain-138 + NotaryRegistry wiring (arch §4.5). All optional; when
// absent the notary adapter falls back to its deterministic mock.
CHAIN_138_RPC_URL: emptyToUndefined(z.string().url().optional()),
CHAIN_138_CHAIN_ID: emptyToUndefined(z.string().regex(/^\d+$/).optional()),
NOTARY_REGISTRY_ADDRESS: emptyToUndefined(
CHAIN_138_RPC_URL: optionalUrl(),
CHAIN_138_CHAIN_ID: z.preprocess(emptyToUndefined, z.string().regex(/^\d+$/).optional()),
NOTARY_REGISTRY_ADDRESS: z.preprocess(
emptyToUndefined,
z.string().regex(/^0x[0-9a-fA-F]{40}$/).optional(),
),
ORCHESTRATOR_PRIVATE_KEY: emptyToUndefined(
ORCHESTRATOR_PRIVATE_KEY: z.preprocess(
emptyToUndefined,
z.string().regex(/^0x[0-9a-fA-F]{64}$/).optional(),
),
});
@@ -49,7 +46,7 @@ export const env = envSchema.parse({
NODE_ENV: process.env.NODE_ENV,
PORT: process.env.PORT || "8080",
DATABASE_URL: process.env.DATABASE_URL,
API_KEYS: process.env.API_KEYS,
API_KEYS: process.env.API_KEYS || process.env.ORCHESTRATOR_API_KEYS,
REDIS_URL: process.env.REDIS_URL,
LOG_LEVEL: process.env.LOG_LEVEL,
ALLOWED_IPS: process.env.ALLOWED_IPS,
@@ -74,7 +71,7 @@ export function validateEnv() {
NODE_ENV: process.env.NODE_ENV || "development",
PORT: process.env.PORT || "8080",
DATABASE_URL: process.env.DATABASE_URL,
API_KEYS: process.env.API_KEYS,
API_KEYS: process.env.API_KEYS || process.env.ORCHESTRATOR_API_KEYS,
REDIS_URL: process.env.REDIS_URL,
LOG_LEVEL: process.env.LOG_LEVEL || "info",
ALLOWED_IPS: process.env.ALLOWED_IPS,
@@ -83,6 +80,10 @@ export function validateEnv() {
AZURE_KEY_VAULT_URL: process.env.AZURE_KEY_VAULT_URL,
AWS_SECRETS_MANAGER_REGION: process.env.AWS_SECRETS_MANAGER_REGION,
SENTRY_DSN: process.env.SENTRY_DSN,
CHAIN_138_RPC_URL: process.env.CHAIN_138_RPC_URL,
CHAIN_138_CHAIN_ID: process.env.CHAIN_138_CHAIN_ID,
NOTARY_REGISTRY_ADDRESS: process.env.NOTARY_REGISTRY_ADDRESS,
ORCHESTRATOR_PRIVATE_KEY: process.env.ORCHESTRATOR_PRIVATE_KEY,
};
envSchema.parse(envWithDefaults);
console.log("✅ Environment variables validated");
@@ -97,4 +98,3 @@ export function validateEnv() {
throw error;
}
}

View File

@@ -70,16 +70,28 @@ app.get("/health", async (req, res) => {
const health = await healthCheck();
res.status(health.status === "healthy" ? 200 : 503).json(health);
});
app.get("/api/health", async (req, res) => {
const health = await healthCheck();
res.status(health.status === "healthy" ? 200 : 503).json(health);
});
app.get("/ready", async (req, res) => {
const ready = await readinessCheck();
res.status(ready ? 200 : 503).json({ ready });
});
app.get("/api/ready", async (req, res) => {
const ready = await readinessCheck();
res.status(ready ? 200 : 503).json({ ready });
});
app.get("/live", async (req, res) => {
const alive = await livenessCheck();
res.status(alive ? 200 : 503).json({ alive });
});
app.get("/api/live", async (req, res) => {
const alive = await livenessCheck();
res.status(alive ? 200 : 503).json({ alive });
});
// Metrics endpoint
app.get("/metrics", async (req, res) => {
@@ -87,6 +99,11 @@ app.get("/metrics", async (req, res) => {
const metrics = await getMetrics();
res.send(metrics);
});
app.get("/api/metrics", async (req, res) => {
res.setHeader("Content-Type", register.contentType);
const metrics = await getMetrics();
res.send(metrics);
});
// API routes with rate limiting
app.use("/api", apiLimiter);
@@ -173,4 +190,3 @@ async function start() {
}
start();

View File

@@ -0,0 +1,80 @@
# CurrenciCombo orchestrator production env (Phoenix CT 8604 / any systemd host)
#
# Installed by scripts/deployment/install.sh to:
# /etc/currencicombo/orchestrator.env
#
# Loaded by the currencicombo-orchestrator.service systemd unit via
# EnvironmentFile=. Values that are committed here are safe defaults;
# secrets are left blank and must be set before first boot.
#
# The portal is a statically built SPA (nginx), so it takes NO runtime env.
# Any VITE_* vars needed at build time are baked into dist/ by
# scripts/deployment/deploy-currencicombo-8604.sh before the rsync.
############################################################
# Server
############################################################
NODE_ENV=production
PORT=8080
# Bind to loopback only when behind NPMplus on the same host; bind
# 0.0.0.0 if NPMplus is on a different host (the CT 8604 case, so 0.0.0.0).
HOST=0.0.0.0
############################################################
# Postgres (local to the CT per install.sh)
############################################################
DATABASE_URL=postgresql://currencicombo:replace-me-on-install@127.0.0.1:5432/currencicombo
############################################################
# Redis (local to the CT per install.sh)
############################################################
REDIS_URL=redis://127.0.0.1:6379
############################################################
# Event bus signing (REQUIRED). install.sh generates this on first run
# via `openssl rand -hex 32` unless the file already exists.
############################################################
EVENT_SIGNING_SECRET=
############################################################
# API keys per role (REQUIRED). install.sh generates three random
# initiator/settler/auditor keys on first run unless set.
# Format: key1:role1,key2:role2,...
############################################################
API_KEYS=
############################################################
# Chain 138 — resolves EXT-CHAIN138-CI-RPC (already resolved).
############################################################
CHAIN_138_RPC_URL=https://rpc.public-0138.defi-oracle.io
CHAIN_138_CHAIN_ID=138
# Leave empty to run mock notary. Populate after running
# `contracts/scripts/deploy-notary-registry.ts` once.
NOTARY_REGISTRY_ADDRESS=
# Leave empty to run mock notary. Otherwise 0x-prefixed 32-byte hex.
ORCHESTRATOR_PRIVATE_KEY=
############################################################
# External dependency blockers (leave blank → mock fallback + EXT-* log)
# These are the exact IDs that the Proxmox
# scripts/verify/check-external-dependencies.sh gate knows about.
############################################################
# EXT-DBIS-CORE — set when dbis_core is deployed and reachable.
DBIS_CORE_URL=
# EXT-FIN-GATEWAY — set when a real Alliance Access / FIN gateway is
# provisioned. Leave blank to use PR R's in-process sandbox.
FIN_SANDBOX_URL=
# EXT-CC-* — the following four blockers are upstream-scaffold repos
# (cc-payment-adapters, cc-audit-ledger, cc-shared-events,
# cc-shared-schemas). They cannot be resolved from this repo; no
# env var flips them. The orchestrator logs EXT-CC-* as active on boot.
# Identity + controls matrix (not a blocker IDs per se — they ship
# today via the cc-identity-core and cc-compliance-controls adapters
# merged in PR V/W). Blank keeps the embedded v0 matrix + mock identity.
CC_IDENTITY_URL=
CC_CONTROLS_MATRIX_URL=

View File

@@ -0,0 +1,254 @@
# CurrenciCombo — Phoenix / systemd deployment
This directory holds everything needed to deploy CurrenciCombo onto a
systemd host — starting with Phoenix CT 8604 on `r630-01`, but any
Debian/Ubuntu (or Alpine) host with Postgres + Redis available works.
The files here are **target-agnostic**. They hardcode no IPs, hostnames,
or VLANs. Environment-specific values — `curucombo.曼李.com`, the
`10.160.0.14` VIP, the NPMplus reverse proxy — are applied at the
edge (NPMplus) and at `/etc/currencicombo/orchestrator.env`, never in
the repo.
## Architecture on CT 8604
```
┌────────────────────┐
curucombo.曼李.com ──▶ NPMplus │192.168.11.167 │
(Cloudflare-proxied) │ TLS terminates here│
└─────────┬──────────┘
┌──────────────────────┴──────────────────────┐
│ │
▼ ▼
curucombo.曼李.com/* (default) curucombo.曼李.com/api/*
(incl. SSE /api/plans/*/events/stream)
│ │
CT 8604 │10.160.0.14:3000 CT 8604 │10.160.0.14:8080
▼ ▼
┌─────────────────────────────┐ ┌─────────────────────────────┐
│ currencicombo-webapp.service │ │ currencicombo-orchestrator │
│ nginx → /opt/currencicombo/ │ │ .service (systemd) │
│ webapp/dist/ │ │ node dist/index.js │
└─────────────────────────────┘ │ env /etc/currencicombo/ │
│ orchestrator.env │
└──────────────┬──────────────┘
postgresql + redis (same CT, local)
```
## Files
| path | purpose |
|---|---|
| `systemd/currencicombo-orchestrator.service` | Node orchestrator, reads `/etc/currencicombo/orchestrator.env` |
| `systemd/currencicombo-webapp.service` | nginx serving the Vite SPA on `:3000` |
| `webapp-nginx.conf` | full nginx.conf for the webapp unit |
| `.env.prod.example` | env template installed to `/etc/currencicombo/orchestrator.env` |
| `install.sh` | one-shot host setup: user / dirs / DB role / systemd units / first-run key handoff file |
| `install-prune-cron.sh` | opt-in daily cron that prunes `/var/lib/currencicombo/backups/` (30-day retention, keep-min 5) |
| `deploy-currencicombo-8604.sh` | build-and-swap deploy driver (the script Phoenix/proxmox deploy-api calls) |
| `README.md` | you're reading it |
## First-time setup on CT 8604
All commands run as **root** inside the CT.
1. Ensure Postgres + Redis are installed and running:
```
apt-get install -y postgresql redis-server
systemctl enable --now postgresql redis-server
```
2. Clone the repo into its staging location (once):
```
install -d -o root -g root /var/lib/currencicombo
git clone https://gitea.d-bis.org/d-bis/CurrenciCombo.git /var/lib/currencicombo/repo
```
3. Run `install.sh` (creates user, DB, systemd units, env file):
```
bash /var/lib/currencicombo/repo/scripts/deployment/install.sh
```
On success you'll see:
```
[install] generated EVENT_SIGNING_SECRET (64 hex)
[install] generated 3 API keys (initiator/settler/auditor)
[install] initial secrets written to /root/currencicombo-first-keys.txt (0600) — record in password manager, then 'shred -u /root/currencicombo-first-keys.txt'
[install] install complete.
```
`install.sh` writes the three API keys + `EVENT_SIGNING_SECRET` to **two** places:
- `/etc/currencicombo/orchestrator.env` — canonical, read by systemd (`0640`, owned by `currencicombo`).
- `/root/currencicombo-first-keys.txt` — **root-only handoff file** (`0600`). Grab it once, record the values in your password manager, then `shred -u` it.
The handoff file is **not** regenerated on re-run — if `orchestrator.env` already exists, `install.sh` does not produce new secrets.
4. (Optional) Install the backup-pruning cron:
```
bash /var/lib/currencicombo/repo/scripts/deployment/install-prune-cron.sh
```
Drops a `/etc/cron.daily/currencicombo-prune-backups` that deletes anything under `/var/lib/currencicombo/backups/` older than 30 days while **always keeping the newest 5** regardless of age. Safe on re-run; opt out with `sudo rm /etc/cron.daily/currencicombo-prune-backups`.
5. If you need to resolve any `EXT-*` blocker (e.g. point at a real dbis_core), edit `/etc/currencicombo/orchestrator.env` before the first deploy.
6. First build-and-start:
```
bash /var/lib/currencicombo/repo/scripts/deployment/deploy-currencicombo-8604.sh
```
Expected tail:
```
[deploy] orchestrator ready: {"ready":true}
[deploy] portal OK (HTTP 200)
[deploy] EXT-* blocker summary from orchestrator boot log:
[ExternalBlockers] 6 active, 1 resolved
id: EXT-DBIS-CORE
id: EXT-CC-PAYMENT-ADAPTERS
...
id: EXT-CHAIN138-CI-RPC (resolved)
[deploy] deploy complete. ref=main sha=<short> ts=<timestamp>
```
## NPMplus ingress changes required at cutover
`curucombo.曼李.com` today proxies 100% to `10.160.0.14:3000`. After
cutover it must become a **single-origin path-routed proxy** with **two**
rules (the SSE endpoint lives at `/api/plans/:id/events/stream`, so it's
already under `/api/*` — no separate `/events/*` rule is needed):
| location | upstream | proxy settings |
|---|---|---|
| `/api/*` | `http://10.160.0.14:8080` | **SSE-friendly settings apply here because the SSE route `/api/plans/:id/events/stream` is under /api/**. Use `proxy_pass http://10.160.0.14:8080;` with **no trailing slash** so `/api/...` reaches the orchestrator unchanged. Set: `proxy_http_version 1.1;`, `proxy_set_header Connection "";`, `proxy_buffering off;`, `proxy_cache off;`, `proxy_read_timeout 24h;`, `proxy_send_timeout 24h;`. Standard forwarding: `proxy_set_header Host $host;`, `X-Real-IP $remote_addr;`, `X-Forwarded-For $proxy_add_x_forwarded_for;`, `X-Forwarded-Proto $scheme;`. The slight overhead of `proxy_buffering off` on plain REST calls is negligible for this workload. |
| `/` | `http://10.160.0.14:3000` | Vite SPA. Default upstream. No special settings. |
If you skip the `/api/*` rule, the nginx in `webapp-nginx.conf`
intentionally returns `HTTP 421` for that path — a clean "upstream is
misconfigured" signal instead of silently returning `index.html` and
breaking the browser with a JSON parse error.
## Subsequent deploys
Every deploy after the first is just:
```
sudo /var/lib/currencicombo/repo/scripts/deployment/deploy-currencicombo-8604.sh
```
Flags:
- `--ref=<branch-or-sha>` — deploy something other than `main`.
- `--dry-run` — print what would happen, don't touch anything.
- `--skip-migrate` — hotfix deploys that don't change the schema.
- `--skip-build` — reuse the build from the previous run (debugging only).
- `--rollback` — restore the most recent `/var/lib/currencicombo/backups/<ts>/` and restart units. Does **not** git-pull or rebuild.
Every deploy writes a timestamped backup to
`/var/lib/currencicombo/backups/<YYYYmmdd-HHMMSS>/` before swapping. Pruning is opt-in via `install-prune-cron.sh` (30-day retention, keep-min 5). Without the cron, backups accumulate forever — quietly filling `/var/lib` is how the next outage starts.
## Failure handling on deploy
**Rollback is manual.** `deploy-currencicombo-8604.sh` **does not** auto-restore the previous backup if the orchestrator fails to become ready. First cutovers typically fail because of env typos or migration mistakes, and auto-restoring hides the failure state ops needs.
Instead, on a readiness timeout the deploy script prints:
- last 40 lines of `journalctl -u currencicombo-orchestrator`
- last 20 lines of `journalctl -u currencicombo-webapp`
- **the exact `--rollback` command with the specific backup path filled in**
Example tail on failure:
```
================================================================
DEPLOY FAILED: orchestrator did not become ready after 60s
================================================================
## currencicombo-orchestrator (last 40 lines):
... env validation error: EVENT_SIGNING_SECRET is required ...
## Units are in whatever state deploy left them. To restore
## the previous build (does NOT revert DB migrations):
sudo /var/lib/currencicombo/repo/scripts/deployment/deploy-currencicombo-8604.sh --rollback
# (will restore /var/lib/currencicombo/backups/20260423-140215)
================================================================
```
Rollback one-liner (when ops has decided to restore):
```
sudo /var/lib/currencicombo/repo/scripts/deployment/deploy-currencicombo-8604.sh --rollback
```
Rollback restores the most recent backup and restarts both units. It **does not** touch the DB. If the failed deploy applied a new migration, DB rollback is a manual `psql` task — the orchestrator's migration runner only emits `up()` paths.
## Post-cutover smoke checks through NPMplus
Once the NPMplus `/api/*` rule is live, from a workstation (not the CT):
```
# 1. Front-door TLS is healthy
curl -skI https://curucombo.xn--vov0g.com/ | head -3
# expect: HTTP/2 200
# expect: NO 'x-nextjs-prerender' header (that was the old Next.js build)
# 2. SPA is the new Vite portal
curl -sk https://curucombo.xn--vov0g.com/ | grep -oE '<title>[^<]+</title>'
# expect: <title>Solace Bank Group PLC — Treasury Management Portal</title>
# 3. Orchestrator ready through NPMplus
curl -sk https://curucombo.xn--vov0g.com/api/ready | head -1
# expect: {"ready":true} (not HTML)
# 4. Orchestrator blocker log (through CT shell, not NPMplus)
ssh root@10.160.0.14 'journalctl -u currencicombo-orchestrator -n 200 | grep -E "ExternalBlockers|EXT-"'
# expect: [ExternalBlockers] 6 active, 1 resolved
# expect: one line per EXT-* id
# 5. SSE actually streams (catches silent NPMplus proxy_buffering=on misconfig)
curl -sk -N --max-time 5 -H 'Accept: text/event-stream' \
https://curucombo.xn--vov0g.com/api/plans/demo-pay-014/events/stream \
| head -20 || true
# expect: HTTP/2 200 with Content-Type: text/event-stream
# expect: at least one 'data: {...}\n\n' frame to arrive WITHIN ~1s
# if you see nothing for 3-5s and then everything dumps at once:
# NPMplus has proxy_buffering=on. Fix: proxy_buffering off; proxy_http_version 1.1; proxy_set_header Connection "";
# if the ping is 401/403: expected — SSE is auth-gated; the point is to
# prove the request REACHED the orchestrator (content-type header +
# chunked response headers) rather than hitting the Vite SPA.
```
A plain `HTTP/2 200` with a `Content-Type: text/html` body on `/api/ready` means NPMplus is silently falling back to the `/` rule — the `/api/*` rule is missing or ordered wrong. The `webapp-nginx.conf` in this repo returns `HTTP 421` for `/api/*` to make that case obvious when debugging CT-locally, but at the NPMplus edge nginx serves whatever NPMplus routes to it.
## Troubleshooting
| symptom | cause / check |
|---|---|
| `/api/*` returns `421 NPMplus is misconfigured` | NPMplus `/api/*` rule missing or wrong upstream. |
| `/events/*` connects then disconnects after ~60s | NPMplus forgot `proxy_buffering off` + high `proxy_read_timeout`. |
| orchestrator unit enters `activating (auto-restart)` loop | `journalctl -u currencicombo-orchestrator -n 80` — usually a zod env-validation error. The boot-time assertion message names the missing/invalid var. |
| orchestrator boot log says `[ExternalBlockers] N active` where N > 6 | you added an `EXT-*` env var without also updating the central registry in `orchestrator/src/config/externalBlockers.ts`. |
| `/health` returns 503 but `/ready` is 200 | memory `critical` is a separate signal from readiness. Inspect CT memory; this happens on constrained builders and is not a deploy bug. |
| portal page loads but MetaMask login does nothing | the portal couldn't reach `/api/auth/*`. Walk back up the NPMplus rule chain. |
## Cutting over from the pre-existing Next.js build
Phoenix previously had an older Next.js "ISO-20022 Combo Flow" app in
`/opt/currencicombo/webapp`. The cutover sequence on CT 8604 is:
1. **Backup the old install** out-of-band:
```
tar czf /root/currencicombo-preRepo-$(date +%s).tgz /opt/currencicombo /etc/currencicombo 2>/dev/null || true
```
2. **Disable the pre-existing systemd units** (they're the same names but point at the old tree):
```
systemctl stop currencicombo-webapp currencicombo-orchestrator
systemctl disable currencicombo-webapp currencicombo-orchestrator
```
3. Run `install.sh` (writes the new units, new nginx, new env). On an already-set-up host this is idempotent: it preserves `/etc/currencicombo/orchestrator.env` if it already exists.
4. Run `deploy-currencicombo-8604.sh`.
5. Apply the NPMplus `/api` + `/` path rules.
6. Smoke from outside the CT: `curl -skI https://curucombo.xn--vov0g.com/ && curl -sk https://curucombo.xn--vov0g.com/api/ready`.
## Proxmox-side follow-up (not in this PR)
After this PR merges and the above cutover runs cleanly, the
`/home/intlc/projects/proxmox` repo needs a separate commit to:
- Update `phoenix-deploy-api/deploy-targets.json` to point at:
- repo: `d-bis/CurrenciCombo`
- branch: `main`
- target: `default`
- deploy entrypoint: `scripts/deployment/deploy-currencicombo-8604.sh`
- Remove any stale `/opt/currencicombo/webapp` Next.js references.
- Drop any description of `ignoreBuildErrors: true` in `webapp/next.config.ts` — the new webapp is Vite+tsc-strict, no build-error suppression.

View File

@@ -0,0 +1,236 @@
#!/usr/bin/env bash
# deploy-currencicombo-8604.sh — build-and-swap deploy for CurrenciCombo.
#
# Runs on a systemd host that has already had `install.sh` applied once.
# This is the script referenced by the Proxmox repo's
# `phoenix-deploy-api/deploy-targets.json` tuple
# (repo=d-bis/CurrenciCombo, branch=main, target=default).
#
# Steps (each idempotent, each can be --dry-run'd):
# 1. git clone/pull /var/lib/currencicombo/repo to the target ref.
# 2. Build orchestrator (npm ci + npm run build).
# 3. Build portal/webapp (npm ci + npm run build), baking
# VITE_ORCHESTRATOR_URL into the bundle.
# 4. Run DB migrations (npm run migrate in orchestrator/).
# 5. Stop systemd units.
# 6. rsync build output into /opt/currencicombo/{orchestrator,webapp}.
# 7. Start systemd units.
# 8. Smoke-test /ready + portal / + print EXT-* blocker summary.
#
# Rollback: `--rollback` restores the previous backup under
# /var/lib/currencicombo/backups/<timestamp>.
#
# CT 8604 is in the filename for ops-grep-ability; the script itself is
# host-agnostic. Override paths via env vars if you run it elsewhere.
set -euo pipefail
# ----- defaults (override via env) ------------------------------------
: "${CC_GIT_REMOTE:=https://gitea.d-bis.org/d-bis/CurrenciCombo.git}"
: "${CC_GIT_REF:=main}"
: "${CC_REPO_DIR:=/var/lib/currencicombo/repo}"
: "${CC_APP_HOME:=/opt/currencicombo}"
: "${CC_BACKUP_DIR:=/var/lib/currencicombo/backups}"
: "${CC_USER:=currencicombo}"
# Portal build-time env. The NPMplus ingress path-routes /api/* and
# /events/* to the orchestrator, so same-origin works.
: "${VITE_ORCHESTRATOR_URL:=https://curucombo.xn--vov0g.com}"
: "${ORCHESTRATOR_UNIT:=currencicombo-orchestrator.service}"
: "${WEBAPP_UNIT:=currencicombo-webapp.service}"
: "${CC_HEALTH_URL:=http://127.0.0.1:8080/ready}"
: "${CC_PORTAL_URL:=http://127.0.0.1:3000/}"
: "${CC_HEALTH_TIMEOUT_SECS:=60}"
# ----- flags ----------------------------------------------------------
DRY_RUN=0
SKIP_MIGRATE=0
SKIP_BUILD=0
DO_ROLLBACK=0
usage() {
cat <<'USAGE'
Usage: sudo ./deploy-currencicombo-8604.sh [flags]
Flags:
--ref=<git-ref> Override CC_GIT_REF (default: main)
--dry-run Print commands, don't run them
--skip-migrate Skip `npm run migrate` step (use for hotfix
deploys where schema hasn't changed)
--skip-build Reuse the existing build in CC_REPO_DIR/dist
(useful after `--dry-run --skip-build=no` from
the previous run)
--rollback Restore the most recent backup and restart.
Does not run git/build/migrate.
-h, --help This help
Env overrides:
CC_GIT_REMOTE, CC_GIT_REF, CC_REPO_DIR, CC_APP_HOME, CC_BACKUP_DIR,
CC_USER, VITE_ORCHESTRATOR_URL, ORCHESTRATOR_UNIT, WEBAPP_UNIT,
CC_HEALTH_URL, CC_PORTAL_URL, CC_HEALTH_TIMEOUT_SECS
USAGE
}
while [[ $# -gt 0 ]]; do
case "$1" in
--ref=*) CC_GIT_REF="${1#*=}"; shift ;;
--dry-run) DRY_RUN=1; shift ;;
--skip-migrate) SKIP_MIGRATE=1; shift ;;
--skip-build) SKIP_BUILD=1; shift ;;
--rollback) DO_ROLLBACK=1; shift ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown arg: $1" >&2; usage; exit 2 ;;
esac
done
log() { printf '[deploy] %s\n' "$*" >&2; }
warn() { printf '[deploy][WARN] %s\n' "$*" >&2; }
die() { printf '[deploy][FATAL] %s\n' "$*" >&2; exit 1; }
run() { if [[ "${DRY_RUN}" -eq 1 ]]; then printf '[deploy][dry-run] %s\n' "$*" >&2; else eval "$*"; fi; }
runcc() { if [[ "${DRY_RUN}" -eq 1 ]]; then printf '[deploy][dry-run][as %s] %s\n' "${CC_USER}" "$*" >&2; else sudo -u "${CC_USER}" -H bash -lc "$*"; fi; }
[[ "$EUID" -eq 0 ]] || die "must run as root (sudo)"
# ----- rollback fast-path ---------------------------------------------
if [[ "${DO_ROLLBACK}" -eq 1 ]]; then
LATEST="$(ls -1dt "${CC_BACKUP_DIR}"/* 2>/dev/null | head -1 || true)"
[[ -n "${LATEST}" ]] || die "no backup under ${CC_BACKUP_DIR}"
log "rolling back to ${LATEST}"
run "systemctl stop '${WEBAPP_UNIT}' '${ORCHESTRATOR_UNIT}'"
run "rsync -a --delete '${LATEST}/orchestrator/' '${CC_APP_HOME}/orchestrator/'"
run "rsync -a --delete '${LATEST}/webapp/' '${CC_APP_HOME}/webapp/'"
run "systemctl start '${ORCHESTRATOR_UNIT}' '${WEBAPP_UNIT}'"
log "rollback applied. systemctl status ${ORCHESTRATOR_UNIT} to verify."
exit 0
fi
# ----- 1. git ---------------------------------------------------------
run "install -d -o '${CC_USER}' -g '${CC_USER}' -m 0755 '${CC_REPO_DIR}'"
run "chown -R '${CC_USER}:${CC_USER}' '${CC_REPO_DIR}'"
if [[ ! -d "${CC_REPO_DIR}/.git" && "${CC_GIT_REF}" != "local" ]]; then
log "cloning ${CC_GIT_REMOTE}${CC_REPO_DIR}"
runcc "git clone '${CC_GIT_REMOTE}' '${CC_REPO_DIR}'"
fi
if [[ -d "${CC_REPO_DIR}/.git" && "${CC_GIT_REF}" != "local" ]]; then
runcc "cd '${CC_REPO_DIR}' && git fetch --prune origin"
runcc "cd '${CC_REPO_DIR}' && git reset --hard 'origin/${CC_GIT_REF}'"
REF_SHA="$(sudo -u "${CC_USER}" git -C "${CC_REPO_DIR}" rev-parse --short HEAD 2>/dev/null || echo unknown)"
log "repo at ${CC_GIT_REF} = ${REF_SHA}"
else
REF_SHA="local"
log "using staged local workspace from ${CC_REPO_DIR}"
fi
# ----- 2. orchestrator build -----------------------------------------
if [[ "${SKIP_BUILD}" -eq 0 ]]; then
log "building orchestrator"
if [[ -f "${CC_REPO_DIR}/orchestrator/package-lock.json" ]]; then
runcc "cd '${CC_REPO_DIR}/orchestrator' && npm ci --no-audit --no-fund"
else
runcc "cd '${CC_REPO_DIR}/orchestrator' && npm install --no-audit --no-fund"
fi
runcc "cd '${CC_REPO_DIR}/orchestrator' && npm run build"
log "building portal (VITE_ORCHESTRATOR_URL=${VITE_ORCHESTRATOR_URL})"
runcc "cd '${CC_REPO_DIR}' && npm ci --include=optional --no-audit --no-fund || npm ci --include=optional --force --no-audit --no-fund"
runcc "cd '${CC_REPO_DIR}' && VITE_ORCHESTRATOR_URL='${VITE_ORCHESTRATOR_URL}' npm run build"
else
log "skipping builds (--skip-build)"
fi
# ----- 3. migrations --------------------------------------------------
if [[ "${SKIP_MIGRATE}" -eq 0 ]]; then
log "running DB migrations"
runcc "cd '${CC_REPO_DIR}/orchestrator' && npm run migrate"
else
log "skipping migrations (--skip-migrate)"
fi
# ----- 4. backup previous install ------------------------------------
TS="$(date +%Y%m%d-%H%M%S)"
BACKUP="${CC_BACKUP_DIR}/${TS}"
if [[ -d "${CC_APP_HOME}/orchestrator/dist" || -d "${CC_APP_HOME}/webapp/dist" ]]; then
log "backing up current install → ${BACKUP}"
run "install -d -o root -g root -m 0700 '${BACKUP}/orchestrator' '${BACKUP}/webapp'"
run "rsync -a '${CC_APP_HOME}/orchestrator/' '${BACKUP}/orchestrator/'"
run "rsync -a '${CC_APP_HOME}/webapp/' '${BACKUP}/webapp/'"
fi
# ----- 5. stop units --------------------------------------------------
log "stopping systemd units"
run "systemctl stop '${WEBAPP_UNIT}' || true"
run "systemctl stop '${ORCHESTRATOR_UNIT}' || true"
# ----- 6. swap in new build ------------------------------------------
log "rsyncing new build into ${CC_APP_HOME}"
# Orchestrator: dist/ + node_modules/ + package.json + package-lock.json
runcc "rsync -a --delete '${CC_REPO_DIR}/orchestrator/dist/' '${CC_APP_HOME}/orchestrator/dist/'"
runcc "rsync -a '${CC_REPO_DIR}/orchestrator/node_modules/' '${CC_APP_HOME}/orchestrator/node_modules/'"
runcc "cp '${CC_REPO_DIR}/orchestrator/package.json' '${CC_APP_HOME}/orchestrator/package.json'"
runcc "if [[ -f '${CC_REPO_DIR}/orchestrator/package-lock.json' ]]; then cp '${CC_REPO_DIR}/orchestrator/package-lock.json' '${CC_APP_HOME}/orchestrator/package-lock.json'; else rm -f '${CC_APP_HOME}/orchestrator/package-lock.json'; fi"
# Webapp: dist/
runcc "rsync -a --delete '${CC_REPO_DIR}/dist/' '${CC_APP_HOME}/webapp/dist/'"
# ----- 7. start units ------------------------------------------------
log "starting systemd units"
run "systemctl start '${ORCHESTRATOR_UNIT}'"
run "systemctl start '${WEBAPP_UNIT}'"
# ----- 8. smoke -------------------------------------------------------
if [[ "${DRY_RUN}" -eq 1 ]]; then
log "dry-run: skipping smoke test"
exit 0
fi
log "waiting up to ${CC_HEALTH_TIMEOUT_SECS}s for orchestrator ${CC_HEALTH_URL}"
SECS=0
until curl -sfL --max-time 3 "${CC_HEALTH_URL}" >/dev/null 2>&1; do
SECS=$((SECS + 2))
if [[ "${SECS}" -ge "${CC_HEALTH_TIMEOUT_SECS}" ]]; then
# Loud failure summary. Deliberately does NOT auto-rollback — first
# cutovers often fail because of env/migration mistakes, and
# auto-restoring the old build hides the failure state ops needs to
# diagnose. Print the exact --rollback command with the specific
# backup path filled in, so it's one copy-paste away if desired.
{
echo
echo "================================================================"
echo "DEPLOY FAILED: orchestrator did not become ready after ${CC_HEALTH_TIMEOUT_SECS}s"
echo "================================================================"
echo
echo "## currencicombo-orchestrator (last 40 lines):"
journalctl -u "${ORCHESTRATOR_UNIT}" -n 40 --no-pager 2>&1 || echo "(journalctl unavailable)"
echo
echo "## currencicombo-webapp (last 20 lines):"
journalctl -u "${WEBAPP_UNIT}" -n 20 --no-pager 2>&1 || echo "(journalctl unavailable)"
echo
echo "## Units are in whatever state deploy left them. To restore"
echo "## the previous build (does NOT revert DB migrations):"
echo
if [[ -n "${BACKUP:-}" && -d "${BACKUP}" ]]; then
echo " sudo $0 --rollback"
echo " # (will restore ${BACKUP})"
else
echo " # No backup was taken (first deploy). Manual recovery required."
fi
echo
echo "================================================================"
} >&2
exit 1
fi
sleep 2
done
log "orchestrator ready: $(curl -sf "${CC_HEALTH_URL}")"
log "probing portal ${CC_PORTAL_URL}"
PORTAL_CODE="$(curl -s -o /dev/null -w '%{http_code}' "${CC_PORTAL_URL}" || echo ERR)"
[[ "${PORTAL_CODE}" =~ ^2 ]] || die "portal returned HTTP ${PORTAL_CODE}"
log "portal OK (HTTP ${PORTAL_CODE})"
log "EXT-* blocker summary from orchestrator boot log:"
journalctl -u "${ORCHESTRATOR_UNIT}" --no-pager -n 200 \
| grep -E 'ExternalBlockers|EXT-[A-Z0-9-]+' | tail -20 || true
log "deploy complete. ref=${CC_GIT_REF} sha=${REF_SHA} ts=${TS}"

View File

@@ -0,0 +1,102 @@
#!/usr/bin/env bash
# install-prune-cron.sh — opt-in cron job to prune old deploy backups.
#
# Run ONCE as root (or with sudo) after install.sh to enable daily
# pruning of /var/lib/currencicombo/backups/. The pruner:
# - deletes entries older than 30 days
# - ALWAYS keeps the newest N backups regardless of age (default 5)
#
# No-op on re-run. Opt out by removing /etc/cron.daily/currencicombo-prune-backups.
set -euo pipefail
BACKUP_DIR="${CC_BACKUP_DIR:-/var/lib/currencicombo/backups}"
RETAIN_DAYS="${CC_BACKUP_RETAIN_DAYS:-30}"
KEEP_MIN="${CC_BACKUP_KEEP_MIN:-5}"
CRON_FILE="/etc/cron.daily/currencicombo-prune-backups"
DRY_RUN=0
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=1; shift ;;
-h|--help)
cat <<'USAGE'
Usage: sudo ./install-prune-cron.sh [--dry-run]
Env overrides:
CC_BACKUP_DIR (default: /var/lib/currencicombo/backups)
CC_BACKUP_RETAIN_DAYS (default: 30)
CC_BACKUP_KEEP_MIN (default: 5)
USAGE
exit 0 ;;
*) echo "unknown arg: $1" >&2; exit 2 ;;
esac
done
log() { printf '[install-prune-cron] %s\n' "$*" >&2; }
die() { printf '[install-prune-cron][FATAL] %s\n' "$*" >&2; exit 1; }
[[ "$EUID" -eq 0 ]] || die "must run as root (sudo)"
# The pruner script body. Runs daily via cron.daily.
# KEEP_MIN is enforced by listing backups newest-first, skipping the
# first KEEP_MIN, then deleting any remaining entries older than
# RETAIN_DAYS. This means we always keep at least KEEP_MIN (even if
# they're all <30 days old), and never delete one of the newest
# KEEP_MIN (even if it's >30 days old on a dormant host).
read -r -d '' PRUNER_BODY <<PRUNER || true
#!/usr/bin/env bash
# Managed by scripts/deployment/install-prune-cron.sh. Edits overwritten
# on next install. Opt out by deleting this file.
set -euo pipefail
BACKUP_DIR="${BACKUP_DIR}"
RETAIN_DAYS=${RETAIN_DAYS}
KEEP_MIN=${KEEP_MIN}
[[ -d "\$BACKUP_DIR" ]] || exit 0
cd "\$BACKUP_DIR"
mapfile -t all < <(find . -mindepth 1 -maxdepth 1 -type d -printf '%T@ %p\n' 2>/dev/null | sort -rn | awk '{print \$2}')
count=\${#all[@]}
if (( count <= KEEP_MIN )); then
logger -t currencicombo-prune "count=\$count <= KEEP_MIN=\$KEEP_MIN; nothing to prune"
exit 0
fi
cutoff=\$(date -d "\$RETAIN_DAYS days ago" +%s)
deleted=0
kept=0
for i in "\${!all[@]}"; do
p="\${all[\$i]}"
if (( i < KEEP_MIN )); then
kept=\$((kept + 1))
continue
fi
mtime=\$(stat -c %Y "\$p" 2>/dev/null || echo 0)
if (( mtime < cutoff )); then
rm -rf -- "\$p"
deleted=\$((deleted + 1))
else
kept=\$((kept + 1))
fi
done
logger -t currencicombo-prune "deleted=\$deleted kept=\$kept total_before=\$count"
PRUNER
if [[ "${DRY_RUN}" -eq 1 ]]; then
log "[dry-run] would write ${CRON_FILE} (0755) with pruner targeting ${BACKUP_DIR}, retain ${RETAIN_DAYS}d, keep-min ${KEEP_MIN}"
echo "---"
echo "${PRUNER_BODY}"
echo "---"
exit 0
fi
printf '%s\n' "${PRUNER_BODY}" > "${CRON_FILE}"
chmod 0755 "${CRON_FILE}"
chown root:root "${CRON_FILE}"
log "installed ${CRON_FILE} (backups older than ${RETAIN_DAYS}d, keep-min ${KEEP_MIN}, target ${BACKUP_DIR})"
log "runs daily via /etc/cron.daily/. Opt out: sudo rm ${CRON_FILE}"
log "logs to syslog (tag currencicombo-prune); journalctl -t currencicombo-prune"

252
scripts/deployment/install.sh Executable file
View File

@@ -0,0 +1,252 @@
#!/usr/bin/env bash
# install.sh — idempotent first-time setup for CurrenciCombo on a systemd host.
#
# Intended to run ONCE per host as root (or with sudo). Running it again is
# safe: it will skip already-present artifacts and warn on conflicts.
#
# What this does:
# 1. Creates the `currencicombo` system user and /opt/currencicombo tree.
# 2. Installs nginx (Debian/Ubuntu or Alpine) if not present.
# 3. Ensures a local Postgres is running and creates a fresh
# `currencicombo` role + DB (refuses to touch an existing one unless
# --force-recreate is passed).
# 4. Ensures a local Redis is running.
# 5. Writes /etc/currencicombo/orchestrator.env from .env.prod.example,
# auto-populating EVENT_SIGNING_SECRET and ORCHESTRATOR_API_KEYS with
# fresh randoms the first time.
# 6. Installs /etc/currencicombo/webapp-nginx.conf.
# 7. Installs the two systemd units and runs `systemctl daemon-reload`.
# 8. Enables (does NOT start) both units. First start happens via
# scripts/deployment/deploy-currencicombo-8604.sh after the first
# successful build.
#
# This script is target-agnostic. It has no hardcoded IP / hostname /
# VLAN. The NPMplus ingress in front of it is configured separately —
# see scripts/deployment/README.md.
set -euo pipefail
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
APP_USER="currencicombo"
APP_HOME="/opt/currencicombo"
ETC_DIR="/etc/currencicombo"
LOG_DIR="/var/log/currencicombo"
REPO_DIR="/var/lib/currencicombo/repo"
ENV_FILE="${ETC_DIR}/orchestrator.env"
NGINX_FILE="${ETC_DIR}/webapp-nginx.conf"
SYSTEMD_DIR="/etc/systemd/system"
FORCE_RECREATE_DB=0
DRY_RUN=0
SKIP_NGINX_INSTALL=0
log() { printf '[install] %s\n' "$*" >&2; }
warn() { printf '[install][WARN] %s\n' "$*" >&2; }
die() { printf '[install][FATAL] %s\n' "$*" >&2; exit 1; }
run() { if [[ "${DRY_RUN}" -eq 1 ]]; then printf '[install][dry-run] %s\n' "$*" >&2; else eval "$*"; fi; }
sql_escape() {
printf "%s" "$1" | sed "s/'/''/g"
}
usage() {
cat <<'USAGE'
Usage: sudo ./install.sh [--force-recreate-db] [--skip-nginx-install] [--dry-run]
--force-recreate-db DROP and recreate the currencicombo Postgres role
and DB even if they already exist. DESTRUCTIVE.
--skip-nginx-install Do not apt/apk install nginx (use if you already
have a custom nginx build in place).
--dry-run Print the commands that would run, don't run them.
USAGE
}
while [[ $# -gt 0 ]]; do
case "$1" in
--force-recreate-db) FORCE_RECREATE_DB=1; shift ;;
--skip-nginx-install) SKIP_NGINX_INSTALL=1; shift ;;
--dry-run) DRY_RUN=1; shift ;;
-h|--help) usage; exit 0 ;;
*) die "unknown arg: $1" ;;
esac
done
[[ "$EUID" -eq 0 ]] || die "must run as root (sudo)"
# ----------------------------------------------------------------------
# 1. User + tree
# ----------------------------------------------------------------------
if id "${APP_USER}" >/dev/null 2>&1; then
log "user ${APP_USER} already exists"
else
log "creating system user ${APP_USER}"
run useradd --system --home-dir "${APP_HOME}" --shell /usr/sbin/nologin --user-group "${APP_USER}"
fi
for d in "${APP_HOME}" "${APP_HOME}/orchestrator" "${APP_HOME}/webapp" \
"${APP_HOME}/webapp/dist" "${ETC_DIR}" "${LOG_DIR}" "${REPO_DIR}"; do
run install -d -o "${APP_USER}" -g "${APP_USER}" -m 0755 "$d"
done
run chown "${APP_USER}:${APP_USER}" "${APP_HOME}" "${LOG_DIR}" "${REPO_DIR}"
run chmod 0750 "${ETC_DIR}"
# ----------------------------------------------------------------------
# 2. nginx (required by currencicombo-webapp.service)
# ----------------------------------------------------------------------
if [[ "${SKIP_NGINX_INSTALL}" -eq 0 ]]; then
if command -v nginx >/dev/null 2>&1; then
log "nginx already installed ($(nginx -v 2>&1 | head -1))"
elif command -v apt-get >/dev/null 2>&1; then
log "installing nginx via apt"
run 'DEBIAN_FRONTEND=noninteractive apt-get update -q'
run 'DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends nginx-light'
# We use our own nginx.conf via -c, so disable the distro site.
run systemctl disable --now nginx 2>/dev/null || true
elif command -v apk >/dev/null 2>&1; then
log "installing nginx via apk"
run apk add --no-cache nginx
run rc-update del nginx 2>/dev/null || true
else
die "no apt or apk available — install nginx manually or re-run with --skip-nginx-install"
fi
fi
[[ -f /etc/nginx/mime.types ]] || warn "/etc/nginx/mime.types missing; webapp-nginx.conf may fail"
# ----------------------------------------------------------------------
# 3. Postgres role + DB
# ----------------------------------------------------------------------
if ! command -v psql >/dev/null 2>&1; then
die "psql not on PATH — install Postgres on this host (e.g. apt install postgresql) before running install.sh"
fi
# Use the OS `postgres` superuser for DDL.
pg_role_exists() {
sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='${APP_USER}';" 2>/dev/null | grep -q 1
}
pg_db_exists() {
sudo -u postgres psql -tAc "SELECT 1 FROM pg_database WHERE datname='${APP_USER}';" 2>/dev/null | grep -q 1
}
if pg_role_exists; then
if [[ "${FORCE_RECREATE_DB}" -eq 1 ]]; then
log "dropping existing role/DB (--force-recreate-db)"
run "sudo -u postgres psql -c 'DROP DATABASE IF EXISTS ${APP_USER};'"
run "sudo -u postgres psql -c 'DROP ROLE IF EXISTS ${APP_USER};'"
else
warn "Postgres role ${APP_USER} already exists — skipping role/DB creation. Re-run with --force-recreate-db to wipe."
fi
fi
if ! pg_role_exists; then
log "creating Postgres role ${APP_USER}"
run "sudo -u postgres psql -c \"CREATE ROLE ${APP_USER} LOGIN;\""
fi
if ! pg_db_exists; then
log "creating Postgres database ${APP_USER}"
run "sudo -u postgres psql -c \"CREATE DATABASE ${APP_USER} OWNER ${APP_USER};\""
fi
# ----------------------------------------------------------------------
# 4. Redis
# ----------------------------------------------------------------------
if systemctl list-unit-files | grep -q '^redis-server\.service'; then
run "systemctl start redis-server.service || true"
run "systemctl enable redis-server.service >/dev/null 2>&1 || true"
elif systemctl list-unit-files | grep -q '^redis\.service'; then
run "systemctl start redis.service || true"
run "systemctl enable redis.service >/dev/null 2>&1 || true"
elif command -v redis-cli >/dev/null 2>&1; then
warn "redis-cli present but no redis-server.service / redis.service unit — assuming external Redis"
else
warn "redis not detected; orchestrator will fall back to in-process event bus. Install redis for multi-replica support."
fi
# ----------------------------------------------------------------------
# 5. orchestrator.env
# ----------------------------------------------------------------------
FIRST_KEYS_FILE="/root/currencicombo-first-keys.txt"
if [[ -f "${ENV_FILE}" ]]; then
log "${ENV_FILE} already exists — leaving alone (no new keys generated)"
else
log "writing ${ENV_FILE}"
install -o "${APP_USER}" -g "${APP_USER}" -m 0640 "${SCRIPT_DIR}/.env.prod.example" "${ENV_FILE}"
# Auto-fill the two REQUIRED secrets so first boot doesn't crash.
SECRET="$(openssl rand -hex 32)"
INIT_KEY="$(openssl rand -hex 24)"
SETT_KEY="$(openssl rand -hex 24)"
AUD_KEY="$(openssl rand -hex 24)"
DB_PASSWORD="$(openssl rand -hex 24)"
DB_PASSWORD_SQL="$(sql_escape "${DB_PASSWORD}")"
API_KEYS_VALUE="${INIT_KEY}:initiator,${SETT_KEY}:settler,${AUD_KEY}:auditor"
DATABASE_URL="postgresql://${APP_USER}:${DB_PASSWORD}@127.0.0.1:5432/${APP_USER}"
log "setting Postgres password for role ${APP_USER}"
run "sudo -u postgres psql -c \"ALTER ROLE ${APP_USER} WITH LOGIN PASSWORD '${DB_PASSWORD_SQL}';\""
run "sed -i 's|^EVENT_SIGNING_SECRET=.*|EVENT_SIGNING_SECRET=${SECRET}|' '${ENV_FILE}'"
run "sed -i 's|^API_KEYS=.*|API_KEYS=${API_KEYS_VALUE}|' '${ENV_FILE}'"
run "sed -i 's|^DATABASE_URL=.*|DATABASE_URL=${DATABASE_URL}|' '${ENV_FILE}'"
run "grep -q '^ORCHESTRATOR_API_KEYS=' '${ENV_FILE}' && sed -i 's|^ORCHESTRATOR_API_KEYS=.*|ORCHESTRATOR_API_KEYS=${API_KEYS_VALUE}|' '${ENV_FILE}' || printf '\nORCHESTRATOR_API_KEYS=%s\n' '${API_KEYS_VALUE}' >> '${ENV_FILE}'"
# Write a root-only handoff file so ops can grab the keys without
# scraping journald or reading the env file. The canonical copy lives
# in ${ENV_FILE}; delete this file once the keys are in your password
# manager.
if [[ "${DRY_RUN}" -eq 0 ]]; then
umask 077
cat > "${FIRST_KEYS_FILE}" <<EOF
# CurrenciCombo first-deploy secrets — generated $(date -Iseconds) by install.sh
#
# This file contains the initial API keys and event-signing secret for the
# orchestrator. The canonical live values live in ${ENV_FILE} and are what
# systemd actually loads. This file is a root-only handoff copy — record
# these values in your password manager, then:
#
# shred -u ${FIRST_KEYS_FILE}
#
# Re-running install.sh does NOT regenerate these values if ${ENV_FILE}
# already exists. Losing both ${FIRST_KEYS_FILE} and ${ENV_FILE} means
# rotating all three API keys and the signing secret.
EVENT_SIGNING_SECRET=${SECRET}
ORCHESTRATOR_API_KEY_INITIATOR=${INIT_KEY}
ORCHESTRATOR_API_KEY_SETTLER=${SETT_KEY}
ORCHESTRATOR_API_KEY_AUDITOR=${AUD_KEY}
DATABASE_URL=${DATABASE_URL}
# As it appears in ${ENV_FILE}:
API_KEYS=${API_KEYS_VALUE}
ORCHESTRATOR_API_KEYS=${API_KEYS_VALUE}
EOF
chmod 0600 "${FIRST_KEYS_FILE}"
chown root:root "${FIRST_KEYS_FILE}"
else
log "[dry-run] would write ${FIRST_KEYS_FILE} (0600, root:root)"
fi
log " generated EVENT_SIGNING_SECRET (64 hex)"
log " generated 3 API keys (initiator/settler/auditor)"
log " generated local Postgres password for ${APP_USER}"
log " initial secrets written to ${FIRST_KEYS_FILE} (0600) — record in password manager, then 'shred -u ${FIRST_KEYS_FILE}'"
fi
# ----------------------------------------------------------------------
# 6. webapp-nginx.conf
# ----------------------------------------------------------------------
run install -o "${APP_USER}" -g "${APP_USER}" -m 0644 \
"${SCRIPT_DIR}/webapp-nginx.conf" "${NGINX_FILE}"
# ----------------------------------------------------------------------
# 7. systemd units
# ----------------------------------------------------------------------
run install -o root -g root -m 0644 \
"${SCRIPT_DIR}/systemd/currencicombo-orchestrator.service" \
"${SYSTEMD_DIR}/currencicombo-orchestrator.service"
run install -o root -g root -m 0644 \
"${SCRIPT_DIR}/systemd/currencicombo-webapp.service" \
"${SYSTEMD_DIR}/currencicombo-webapp.service"
run systemctl daemon-reload
# ----------------------------------------------------------------------
# 8. Enable (but do NOT start yet — no build exists)
# ----------------------------------------------------------------------
run systemctl enable currencicombo-orchestrator.service
run systemctl enable currencicombo-webapp.service
log "install complete."
log " next: run scripts/deployment/deploy-currencicombo-8604.sh as root to build + start."

View File

@@ -0,0 +1,34 @@
[Unit]
Description=CurrenciCombo orchestrator (Node)
Documentation=https://gitea.d-bis.org/d-bis/CurrenciCombo
After=network-online.target postgresql.service redis-server.service redis.service
Wants=network-online.target
[Service]
Type=simple
User=currencicombo
Group=currencicombo
WorkingDirectory=/opt/currencicombo/orchestrator
EnvironmentFile=/etc/currencicombo/orchestrator.env
ExecStart=/usr/bin/node /opt/currencicombo/orchestrator/dist/index.js
Restart=on-failure
RestartSec=5
TimeoutStopSec=20
StandardOutput=journal
StandardError=journal
SyslogIdentifier=currencicombo-orchestrator
# Hardening
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ProtectHome=yes
ReadWritePaths=/var/log/currencicombo
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictSUIDSGID=yes
LockPersonality=yes
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,34 @@
[Unit]
Description=CurrenciCombo webapp (Vite SPA served by nginx)
Documentation=https://gitea.d-bis.org/d-bis/CurrenciCombo
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=currencicombo
Group=currencicombo
RuntimeDirectory=currencicombo-webapp
RuntimeDirectoryMode=0755
ExecStart=/usr/sbin/nginx -c /etc/currencicombo/webapp-nginx.conf -g 'daemon off; pid /run/currencicombo-webapp/nginx.pid;'
ExecReload=/usr/sbin/nginx -c /etc/currencicombo/webapp-nginx.conf -s reload
Restart=on-failure
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=currencicombo-webapp
# Hardening
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ProtectHome=yes
ReadWritePaths=/var/log/currencicombo /run/currencicombo-webapp
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictSUIDSGID=yes
LockPersonality=yes
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,80 @@
# Self-contained nginx.conf for the CurrenciCombo Vite SPA.
# Invoked by the `currencicombo-webapp.service` systemd unit and installed
# to /etc/currencicombo/webapp-nginx.conf by scripts/deployment/install.sh.
#
# Listens on :3000 (NPMplus upstream). NPMplus path-routes /api/* to the
# orchestrator on :8080 (with SSE-friendly settings — see README.md);
# everything else lands here.
# This config does NOT proxy /api itself — that's intentional so a wrong
# NPMplus rule fails loudly instead of silently bypassing the orchestrator.
worker_processes auto;
error_log /var/log/currencicombo/webapp-nginx.error.log warn;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/currencicombo/webapp-nginx.access.log combined;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;
server_tokens off;
gzip on;
gzip_types text/plain text/css application/javascript application/json image/svg+xml;
gzip_min_length 1024;
# Uploads/bodies: the portal is a static SPA, so any request with a body
# is almost certainly mis-routed. Cap tight.
client_max_body_size 1m;
server {
listen 3000 default_server;
listen [::]:3000 default_server;
server_name _;
root /opt/currencicombo/webapp/dist;
index index.html;
# Security headers are also set by NPMplus, but apply them here too
# so they survive a direct-to-CT curl for debugging.
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# Immutable asset bundles.
location /assets/ {
access_log off;
expires 1y;
add_header Cache-Control "public, max-age=31536000, immutable";
try_files $uri =404;
}
# Deny sourcemaps in prod.
location ~ \.map$ {
access_log off;
deny all;
return 404;
}
# Guard-rail: if NPMplus fails to path-route /api/*, surface it as a
# clean 421 rather than serving index.html and confusing the browser
# with a JSON parse error. The SSE endpoint lives at
# /api/plans/:id/events/stream, which also sits under /api/, so one
# rule covers both.
location /api/ {
return 421 "NPMplus is misconfigured: /api/* must proxy to orchestrator :8080\n";
add_header Content-Type text/plain always;
}
# SPA fallback. Must come last.
location / {
try_files $uri $uri/ /index.html;
}
}
}