phoenix: automate CurrenciCombo e2e deploys
All checks were successful
Deploy to Phoenix / deploy (push) Successful in 31s

This commit is contained in:
defiQUG
2026-04-22 20:05:26 -07:00
parent 4f7cda9b2f
commit 3bea587e12
3 changed files with 961 additions and 46 deletions

View File

@@ -0,0 +1,247 @@
{
"defaults": {
"timeout_sec": 1800
},
"targets": [
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "default",
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"phoenix-deploy-api/scripts/install-systemd.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "http://192.168.11.59:4001/health",
"expect_status": 200,
"expect_body_includes": "phoenix-deploy-api",
"attempts": 8,
"delay_ms": 3000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "cloudflare-sync",
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "cloudflare-sync-force",
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "portal-live",
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/sync-sankofa-portal-7801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"SANKOFA_PORTAL_SRC"
],
"healthcheck": {
"url": "http://192.168.11.51:3000/",
"expect_status": 200,
"expect_body_includes": "<html",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/CurrenciCombo",
"branch": "main",
"target": "default",
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"PHOENIX_DEPLOY_WORKSPACE"
],
"healthcheck": {
"url": "https://curucombo.xn--vov0g.com/api/ready",
"expect_status": 200,
"expect_body_includes": "\"ready\":true",
"attempts": 12,
"delay_ms": 5000,
"timeout_ms": 15000
}
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "atomic-swap-dapp-live",
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
"expect_status": 200,
"expect_body_includes": "\"liveBridgeRoutes\"",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 15000
}
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "default",
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"phoenix-deploy-api/scripts/install-systemd.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "http://192.168.11.59:4001/health",
"expect_status": 200,
"expect_body_includes": "phoenix-deploy-api",
"attempts": 8,
"delay_ms": 3000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "atomic-swap-dapp-live",
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
"expect_status": 200,
"expect_body_includes": "\"liveBridgeRoutes\"",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 15000
}
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "cloudflare-sync",
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "cloudflare-sync-force",
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "portal-live",
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/sync-sankofa-portal-7801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"SANKOFA_PORTAL_SRC"
],
"healthcheck": {
"url": "http://192.168.11.51:3000/",
"expect_status": 200,
"expect_body_includes": "<html",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/CurrenciCombo",
"branch": "master",
"target": "default",
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"PHOENIX_DEPLOY_WORKSPACE"
],
"healthcheck": {
"url": "https://curucombo.xn--vov0g.com/api/ready",
"expect_status": 200,
"expect_body_includes": "\"ready\":true",
"attempts": 12,
"delay_ms": 5000,
"timeout_ms": 15000
}
}
]
}

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env node
/**
* Phoenix Deploy API — Gitea webhook receiver, deploy stub, and Phoenix API Railing (Infra/VE)
* Phoenix Deploy API — Gitea webhook receiver, deploy execution API, and Phoenix API Railing (Infra/VE)
*
* Endpoints:
* POST /webhook/gitea — Receives Gitea push/tag/PR webhooks
@@ -19,7 +19,9 @@
import crypto from 'crypto';
import https from 'https';
import path from 'path';
import { readFileSync, existsSync } from 'fs';
import { promisify } from 'util';
import { execFile as execFileCallback } from 'child_process';
import { cpSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readdirSync, rmSync, writeFileSync } from 'fs';
import { fileURLToPath } from 'url';
import express from 'express';
@@ -29,6 +31,13 @@ const PORT = parseInt(process.env.PORT || '4001', 10);
const GITEA_URL = (process.env.GITEA_URL || 'https://gitea.d-bis.org').replace(/\/$/, '');
const GITEA_TOKEN = process.env.GITEA_TOKEN || '';
const WEBHOOK_SECRET = process.env.PHOENIX_DEPLOY_SECRET || '';
const PHOENIX_REPO_ROOT_DEFAULT = (process.env.PHOENIX_REPO_ROOT_DEFAULT || '/srv/projects/proxmox').trim();
const ATOMIC_SWAP_REPO = (process.env.PHOENIX_ATOMIC_SWAP_REPO || 'd-bis/atomic-swap-dapp').trim();
const ATOMIC_SWAP_REF = (process.env.PHOENIX_ATOMIC_SWAP_REF || 'main').trim();
const CROSS_CHAIN_PMM_LPS_REPO = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REPO || '').trim();
const CROSS_CHAIN_PMM_LPS_REF = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REF || 'main').trim();
const SMOM_DBIS_138_REPO = (process.env.PHOENIX_SMOM_DBIS_138_REPO || '').trim();
const SMOM_DBIS_138_REF = (process.env.PHOENIX_SMOM_DBIS_138_REF || 'main').trim();
const PROXMOX_HOST = process.env.PROXMOX_HOST || '';
const PROXMOX_PORT = parseInt(process.env.PROXMOX_PORT || '8006', 10);
@@ -42,6 +51,17 @@ const PROMETHEUS_URL = (process.env.PROMETHEUS_URL || 'http://localhost:9090').r
const PHOENIX_WEBHOOK_URL = process.env.PHOENIX_WEBHOOK_URL || '';
const PHOENIX_WEBHOOK_SECRET = process.env.PHOENIX_WEBHOOK_SECRET || '';
const PARTNER_KEYS = (process.env.PHOENIX_PARTNER_KEYS || '').split(',').map((k) => k.trim()).filter(Boolean);
const WEBHOOK_DEPLOY_ENABLED = process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === '1' || process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === 'true';
const execFile = promisify(execFileCallback);
function expandEnvTokens(value, env = process.env) {
if (typeof value !== 'string') return value;
return value.replace(/\$\{([A-Z0-9_]+)\}/gi, (_, key) => env[key] || '');
}
function resolvePhoenixRepoRoot() {
return (process.env.PHOENIX_REPO_ROOT || PHOENIX_REPO_ROOT_DEFAULT || '').trim().replace(/\/$/, '');
}
/**
* Manifest resolution order:
@@ -63,15 +83,395 @@ function resolvePublicSectorManifestPath() {
return path.join(__dirname, '..', 'config', 'public-sector-program-manifest.json');
}
function resolveDeployTargetsPath() {
const override = (process.env.DEPLOY_TARGETS_PATH || '').trim();
if (override && existsSync(override)) return override;
const bundled = path.join(__dirname, 'deploy-targets.json');
if (existsSync(bundled)) return bundled;
return bundled;
}
function loadDeployTargetsConfig() {
const configPath = resolveDeployTargetsPath();
if (!existsSync(configPath)) {
return {
path: configPath,
defaults: {},
targets: [],
};
}
const raw = readFileSync(configPath, 'utf8');
const parsed = JSON.parse(raw);
return {
path: configPath,
defaults: parsed.defaults || {},
targets: Array.isArray(parsed.targets) ? parsed.targets : [],
};
}
function findDeployTarget(repo, branch, requestedTarget) {
const config = loadDeployTargetsConfig();
const wantedTarget = requestedTarget || 'default';
const match = config.targets.find((entry) => {
if (entry.repo !== repo) return false;
if ((entry.branch || 'main') !== branch) return false;
return (entry.target || 'default') === wantedTarget;
});
return { config, match, wantedTarget };
}
async function sleep(ms) {
await new Promise((resolve) => setTimeout(resolve, ms));
}
async function verifyHealthCheck(healthcheck) {
if (!healthcheck || !healthcheck.url) return null;
const attempts = Math.max(1, Number(healthcheck.attempts || 1));
const delayMs = Math.max(0, Number(healthcheck.delay_ms || 0));
const timeoutMs = Math.max(1000, Number(healthcheck.timeout_ms || 10000));
const expectedStatus = Number(healthcheck.expect_status || 200);
const expectBodyIncludes = healthcheck.expect_body_includes || '';
let lastError = null;
for (let attempt = 1; attempt <= attempts; attempt += 1) {
try {
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), timeoutMs);
const res = await fetch(healthcheck.url, { signal: controller.signal });
const body = await res.text();
clearTimeout(timeout);
if (res.status !== expectedStatus) {
throw new Error(`Expected HTTP ${expectedStatus}, got ${res.status}`);
}
if (expectBodyIncludes && !body.includes(expectBodyIncludes)) {
throw new Error(`Health body missing expected text: ${expectBodyIncludes}`);
}
return {
ok: true,
url: healthcheck.url,
status: res.status,
attempt,
};
} catch (err) {
lastError = err;
if (attempt < attempts && delayMs > 0) {
await sleep(delayMs);
}
}
}
throw new Error(`Health check failed for ${healthcheck.url}: ${lastError?.message || 'unknown error'}`);
}
async function downloadRepoArchive({ owner, repo, ref, archivePath, authToken }) {
const archiveRef = `${ref}.tar.gz`;
const url = `${GITEA_URL}/api/v1/repos/${owner}/${repo}/archive/${archiveRef}`;
const headers = {};
if (authToken) headers.Authorization = `token ${authToken}`;
const res = await fetch(url, { headers });
if (!res.ok) {
throw new Error(`Failed to download archive ${owner}/${repo}@${ref}: HTTP ${res.status}`);
}
const buffer = Buffer.from(await res.arrayBuffer());
writeFileSync(archivePath, buffer);
}
function syncExtractedTree({ sourceRoot, destRoot, entries = null }) {
mkdirSync(destRoot, { recursive: true });
const selectedEntries = Array.isArray(entries) ? entries : readdirSync(sourceRoot);
for (const entry of selectedEntries) {
const sourcePath = path.join(sourceRoot, entry);
if (!existsSync(sourcePath)) continue;
const destPath = path.join(destRoot, entry);
rmSync(destPath, { recursive: true, force: true });
cpSync(sourcePath, destPath, { recursive: true });
}
}
async function syncRepoArchive({ owner, repo, ref, destRoot, entries = null, authToken = '' }) {
const tempDir = mkdtempSync('/tmp/phoenix-archive-');
const archivePath = path.join(tempDir, 'repo.tar.gz');
const extractDir = path.join(tempDir, 'extract');
mkdirSync(extractDir, { recursive: true });
try {
await downloadRepoArchive({ owner, repo, ref, archivePath, authToken });
await execFile('tar', ['-xzf', archivePath, '-C', extractDir]);
const [rootDir] = readdirSync(extractDir);
if (!rootDir) {
throw new Error(`Archive for ${owner}/${repo}@${ref} was empty`);
}
syncExtractedTree({
sourceRoot: path.join(extractDir, rootDir),
destRoot,
entries,
});
} finally {
rmSync(tempDir, { recursive: true, force: true });
}
}
async function prepareDeployWorkspace({ repo, branch, sha, target }) {
const repoRoot = resolvePhoenixRepoRoot();
if (!repoRoot) {
throw new Error('PHOENIX_REPO_ROOT is not configured');
}
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
const externalWorkspaceRoot = path.join(repoRoot, '.phoenix-deploy-workspaces', owner, repoName);
// Manual smoke tests can target the already-staged local workspace without
// forcing an archive sync from Gitea.
if (sha === 'HEAD' || sha === 'local') {
mkdirSync(repoRoot, { recursive: true });
if (repo !== 'd-bis/proxmox') {
mkdirSync(externalWorkspaceRoot, { recursive: true });
}
return {
PHOENIX_REPO_ROOT: repoRoot,
PROXMOX_REPO_ROOT: repoRoot,
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
};
}
const ref = sha || branch || 'main';
if (repo === 'd-bis/proxmox') {
await syncRepoArchive({
owner,
repo: repoName,
ref,
destRoot: repoRoot,
entries: ['config', 'phoenix-deploy-api', 'reports', 'scripts', 'token-lists'],
authToken: GITEA_TOKEN,
});
} else {
await syncRepoArchive({
owner,
repo: repoName,
ref,
destRoot: externalWorkspaceRoot,
authToken: GITEA_TOKEN,
});
}
if (repo === 'd-bis/proxmox' && target === 'atomic-swap-dapp-live') {
const [swapOwner, swapRepo] = ATOMIC_SWAP_REPO.includes('/')
? ATOMIC_SWAP_REPO.split('/')
: ['d-bis', ATOMIC_SWAP_REPO];
await syncRepoArchive({
owner: swapOwner,
repo: swapRepo,
ref: ATOMIC_SWAP_REF,
destRoot: path.join(repoRoot, 'atomic-swap-dapp'),
authToken: GITEA_TOKEN,
});
if (CROSS_CHAIN_PMM_LPS_REPO) {
const [lpsOwner, lpsRepo] = CROSS_CHAIN_PMM_LPS_REPO.includes('/')
? CROSS_CHAIN_PMM_LPS_REPO.split('/')
: ['d-bis', CROSS_CHAIN_PMM_LPS_REPO];
await syncRepoArchive({
owner: lpsOwner,
repo: lpsRepo,
ref: CROSS_CHAIN_PMM_LPS_REF,
destRoot: path.join(repoRoot, 'cross-chain-pmm-lps'),
authToken: GITEA_TOKEN,
});
}
if (SMOM_DBIS_138_REPO) {
const [smomOwner, smomRepo] = SMOM_DBIS_138_REPO.includes('/')
? SMOM_DBIS_138_REPO.split('/')
: ['d-bis', SMOM_DBIS_138_REPO];
await syncRepoArchive({
owner: smomOwner,
repo: smomRepo,
ref: SMOM_DBIS_138_REF,
destRoot: path.join(repoRoot, 'smom-dbis-138'),
authToken: GITEA_TOKEN,
});
}
}
return {
PHOENIX_REPO_ROOT: repoRoot,
PROXMOX_REPO_ROOT: repoRoot,
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
};
}
async function runDeployTarget(definition, configDefaults, context, envOverrides = {}) {
if (!Array.isArray(definition.command) || definition.command.length === 0) {
throw new Error('Deploy target is missing a command array');
}
const childEnv = {
...process.env,
...envOverrides,
PHOENIX_DEPLOY_REPO: context.repo,
PHOENIX_DEPLOY_BRANCH: context.branch,
PHOENIX_DEPLOY_SHA: context.sha || '',
PHOENIX_DEPLOY_TARGET: context.target,
PHOENIX_DEPLOY_TRIGGER: context.trigger,
};
const cwd = expandEnvTokens(definition.cwd || configDefaults.cwd || process.cwd(), childEnv);
const timeoutSeconds = Number(definition.timeout_sec || configDefaults.timeout_sec || 1800);
const timeout = Number.isFinite(timeoutSeconds) && timeoutSeconds > 0 ? timeoutSeconds * 1000 : 1800 * 1000;
const command = definition.command.map((part) => expandEnvTokens(part, childEnv));
const missingEnv = (definition.required_env || []).filter((key) => !childEnv[key]);
if (missingEnv.length > 0) {
throw new Error(`Missing required env for deploy target: ${missingEnv.join(', ')}`);
}
if (!existsSync(cwd)) {
throw new Error(`Deploy working directory does not exist: ${cwd}`);
}
const { stdout, stderr } = await execFile(command[0], command.slice(1), {
cwd,
env: childEnv,
timeout,
maxBuffer: 10 * 1024 * 1024,
});
const healthcheck = await verifyHealthCheck(definition.healthcheck || configDefaults.healthcheck || null);
return {
cwd,
command,
stdout: stdout || '',
stderr: stderr || '',
timeout_sec: timeoutSeconds,
healthcheck,
};
}
async function executeDeploy({ repo, branch = 'main', target = 'default', sha = '', trigger = 'api' }) {
if (!repo) {
const error = new Error('repo required');
error.statusCode = 400;
error.payload = { error: error.message };
throw error;
}
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
const commitSha = sha || '';
const requestedTarget = target || 'default';
const { config, match, wantedTarget } = findDeployTarget(repo, branch, requestedTarget);
if (!match) {
const error = new Error('Deploy target not configured');
error.statusCode = 404;
error.payload = {
error: error.message,
repo,
branch,
target: wantedTarget,
config_path: config.path,
};
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `No deploy target for ${repo} ${branch} ${wantedTarget}`);
}
throw error;
}
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
}
console.log(`[deploy] ${repo} branch=${branch} target=${wantedTarget} sha=${commitSha} trigger=${trigger}`);
let deployResult = null;
let deployError = null;
let envOverrides = {};
try {
envOverrides = await prepareDeployWorkspace({
repo,
branch,
sha: commitSha,
target: wantedTarget,
});
deployResult = await runDeployTarget(match, config.defaults, {
repo,
branch,
sha: commitSha,
target: wantedTarget,
trigger,
}, envOverrides);
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'success', `Deployed to ${wantedTarget}`);
}
return {
status: 'completed',
repo,
branch,
target: wantedTarget,
config_path: config.path,
command: deployResult.command,
cwd: deployResult.cwd,
stdout: deployResult.stdout,
stderr: deployResult.stderr,
healthcheck: deployResult.healthcheck,
};
} catch (err) {
deployError = err;
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `Deploy failed: ${err.message.slice(0, 120)}`);
}
err.statusCode = err.statusCode || 500;
err.payload = err.payload || {
error: err.message,
repo,
branch,
target: wantedTarget,
config_path: config.path,
};
throw err;
} finally {
if (PHOENIX_WEBHOOK_URL) {
const payload = {
event: 'deploy.completed',
repo,
branch,
target: wantedTarget,
sha: commitSha,
success: Boolean(deployResult),
command: deployResult?.command,
cwd: deployResult?.cwd,
phoenix_repo_root: envOverrides.PHOENIX_REPO_ROOT || null,
error: deployError?.message || null,
};
const body = JSON.stringify(payload);
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
fetch(PHOENIX_WEBHOOK_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
body,
}).catch((e) => console.error('[webhook] outbound failed', e.message));
}
}
}
const httpsAgent = new https.Agent({ rejectUnauthorized: process.env.PROXMOX_TLS_VERIFY !== '0' });
function formatProxmoxAuthHeader(user, tokenName, tokenValue) {
if (tokenName.includes('!')) {
return `PVEAPIToken=${tokenName}=${tokenValue}`;
}
return `PVEAPIToken=${user}!${tokenName}=${tokenValue}`;
}
async function proxmoxRequest(endpoint, method = 'GET', body = null) {
const baseUrl = `https://${PROXMOX_HOST}:${PROXMOX_PORT}/api2/json`;
const url = `${baseUrl}${endpoint}`;
const options = {
method,
headers: {
Authorization: `PVEAPIToken=${PROXMOX_USER}!${PROXMOX_TOKEN_NAME}=${PROXMOX_TOKEN_VALUE}`,
Authorization: formatProxmoxAuthHeader(PROXMOX_USER, PROXMOX_TOKEN_NAME, PROXMOX_TOKEN_VALUE),
'Content-Type': 'application/json',
},
agent: httpsAgent,
@@ -162,12 +562,44 @@ app.post('/webhook/gitea', async (req, res) => {
if (action === 'push' || (action === 'synchronize' && payload.pull_request)) {
if (branch === 'main' || branch === 'master' || ref.startsWith('refs/tags/')) {
if (sha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, sha, 'pending', 'Phoenix deployment triggered');
if (!WEBHOOK_DEPLOY_ENABLED) {
return res.status(200).json({
received: true,
repo: fullName,
branch,
sha,
deployed: false,
message: 'Webhook accepted; set PHOENIX_WEBHOOK_DEPLOY_ENABLED=1 to execute deploys from webhook events.',
});
}
try {
const result = await executeDeploy({
repo: fullName,
branch,
sha,
target: 'default',
trigger: 'webhook',
});
return res.status(200).json({
received: true,
repo: fullName,
branch,
sha,
deployed: true,
result,
});
} catch (err) {
return res.status(200).json({
received: true,
repo: fullName,
branch,
sha,
deployed: false,
error: err.message,
details: err.payload || null,
});
}
// Stub: enqueue deploy; actual implementation would call Proxmox/deploy logic
console.log(`[deploy-stub] Would deploy ${fullName} branch=${branch} sha=${sha}`);
// Stub: when full deploy runs, call setGiteaCommitStatus(owner, repoName, sha, 'success'|'failure', ...)
}
}
@@ -185,47 +617,36 @@ app.post('/api/deploy', async (req, res) => {
}
const { repo, branch = 'main', target, sha } = req.body;
if (!repo) {
return res.status(400).json({ error: 'repo required' });
try {
const result = await executeDeploy({
repo,
branch,
sha,
target,
trigger: 'api',
});
res.status(200).json(result);
} catch (err) {
res.status(err.statusCode || 500).json(err.payload || { error: err.message });
}
});
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
const commitSha = sha || '';
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
}
console.log(`[deploy] ${repo} branch=${branch} target=${target || 'default'} sha=${commitSha}`);
// Stub: no real deploy yet — report success so Gitea shows green; replace with real deploy + setGiteaCommitStatus on completion
const deploySuccess = true;
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(
owner,
repoName,
commitSha,
deploySuccess ? 'success' : 'failure',
deploySuccess ? 'Deploy accepted (stub)' : 'Deploy failed (stub)'
);
}
res.status(202).json({
status: 'accepted',
repo,
branch,
target: target || 'default',
message: 'Deploy request queued (stub). Implement full deploy logic in Sankofa Phoenix API.',
app.get('/api/deploy-targets', (req, res) => {
const config = loadDeployTargetsConfig();
const targets = config.targets.map((entry) => ({
repo: entry.repo,
branch: entry.branch || 'main',
target: entry.target || 'default',
description: entry.description || '',
cwd: entry.cwd || config.defaults.cwd || '',
command: entry.command || [],
has_healthcheck: Boolean(entry.healthcheck || config.defaults.healthcheck),
}));
res.json({
config_path: config.path,
count: targets.length,
targets,
});
if (PHOENIX_WEBHOOK_URL) {
const payload = { event: 'deploy.completed', repo, branch, target: target || 'default', sha: commitSha, success: deploySuccess };
const body = JSON.stringify(payload);
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
fetch(PHOENIX_WEBHOOK_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
body,
}).catch((e) => console.error('[webhook] outbound failed', e.message));
}
});
/**
@@ -474,7 +895,10 @@ app.listen(PORT, () => {
if (!GITEA_TOKEN) console.warn('GITEA_TOKEN not set — commit status updates disabled');
if (!hasProxmox) console.warn('PROXMOX_* not set — Infra/VE API returns stub data');
if (PHOENIX_WEBHOOK_URL) console.log('Outbound webhook enabled:', PHOENIX_WEBHOOK_URL);
if (WEBHOOK_DEPLOY_ENABLED) console.log('Inbound webhook deploy execution enabled');
if (PARTNER_KEYS.length > 0) console.log('Partner API key auth enabled for /api/v1/* (except GET /api/v1/public-sector/programs)');
const mpath = resolvePublicSectorManifestPath();
const dpath = resolveDeployTargetsPath();
console.log(`Public-sector manifest: ${mpath} (${existsSync(mpath) ? 'ok' : 'missing'})`);
console.log(`Deploy targets: ${dpath} (${existsSync(dpath) ? 'ok' : 'missing'})`);
});

View File

@@ -0,0 +1,244 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/scripts/lib/load-project-env.sh"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
PHOENIX_DEPLOY_WORKSPACE="${PHOENIX_DEPLOY_WORKSPACE:-}"
PROXMOX_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-root}"
VMID="${CURRENCICOMBO_PHOENIX_VMID:-8604}"
CT_IP="${IP_CURRENCICOMBO_PHOENIX:-10.160.0.14}"
CT_REPO_DIR="${CT_REPO_DIR:-/var/lib/currencicombo/repo}"
PUBLIC_URL="${PUBLIC_URL:-https://curucombo.xn--vov0g.com}"
PUBLIC_DOMAIN="${PUBLIC_DOMAIN:-curucombo.xn--vov0g.com}"
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS:-192.168.11.167}:81}"
NPM_EMAIL="${NPM_EMAIL:-}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
DRY_RUN=0
usage() {
cat <<'USAGE'
Usage: phoenix-deploy-currencicombo-from-workspace.sh [--dry-run]
Requires:
PHOENIX_DEPLOY_WORKSPACE Full staged CurrenciCombo checkout prepared by phoenix-deploy-api
This script:
1. Packs the staged repo workspace.
2. Pushes it into CT 8604 on r630-01.
3. Ensures host prerequisites, install.sh, prune cron, and deploy script run in-CT.
4. Updates the public NPMplus host so /api/* preserves the full path and supports SSE.
5. Verifies the public portal + /api/ready end to end.
USAGE
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=1; shift ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown arg: $1" >&2; usage; exit 2 ;;
esac
done
log() { printf '[currencicombo-phoenix] %s\n' "$*" >&2; }
die() { printf '[currencicombo-phoenix][FATAL] %s\n' "$*" >&2; exit 1; }
run() { if [[ "$DRY_RUN" -eq 1 ]]; then printf '[dry-run] %s\n' "$*" >&2; else eval "$*"; fi; }
need_cmd() { command -v "$1" >/dev/null 2>&1 || die "missing required command: $1"; }
for cmd in ssh scp tar curl jq mktemp; do
need_cmd "$cmd"
done
[[ -n "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "PHOENIX_DEPLOY_WORKSPACE is required"
[[ -d "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "staged workspace missing: $PHOENIX_DEPLOY_WORKSPACE"
if [[ "$DRY_RUN" -eq 0 ]]; then
[[ -n "$NPM_EMAIL" ]] || die "NPM_EMAIL is required"
[[ -n "$NPM_PASSWORD" ]] || die "NPM_PASSWORD is required"
fi
SSH_TARGET="${PROXMOX_SSH_USER}@${PROXMOX_HOST}"
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
TMP_DIR="$(mktemp -d /tmp/currencicombo-phoenix-XXXXXX)"
ARCHIVE_PATH="${TMP_DIR}/currencicombo-workspace.tgz"
REMOTE_ARCHIVE="/tmp/$(basename "$ARCHIVE_PATH")"
CT_ARCHIVE="/root/$(basename "$ARCHIVE_PATH")"
NPM_COOKIE_JAR="${TMP_DIR}/npm-cookies.txt"
cleanup() {
rm -rf "$TMP_DIR"
}
trap cleanup EXIT
ssh_remote() {
local cmd="$1"
if [[ "$DRY_RUN" -eq 1 ]]; then
printf '[dry-run] ssh %q %q\n' "$SSH_TARGET" "$cmd" >&2
else
ssh "${SSH_OPTS[@]}" "$SSH_TARGET" "$cmd"
fi
}
pct_exec_script() {
local local_script="$1"
local remote_script
local ct_script
remote_script="/tmp/$(basename "$local_script")"
ct_script="/root/$(basename "$local_script")"
run "scp ${SSH_OPTS[*]} '$local_script' '${SSH_TARGET}:${remote_script}'"
ssh_remote "pct push ${VMID} '${remote_script}' '${ct_script}' --perms 0755 && rm -f '${remote_script}' && pct exec ${VMID} -- bash '${ct_script}' && pct exec ${VMID} -- rm -f '${ct_script}'"
}
log "packing staged workspace from ${PHOENIX_DEPLOY_WORKSPACE}"
run "tar -C '$PHOENIX_DEPLOY_WORKSPACE' --exclude='.git' --exclude='node_modules' --exclude='dist' --exclude='orchestrator/node_modules' --exclude='orchestrator/dist' -czf '$ARCHIVE_PATH' ."
log "ensuring CT ${VMID} is running on ${PROXMOX_HOST}"
ssh_remote "pct start ${VMID} >/dev/null 2>&1 || true"
log "uploading staged archive to CT ${VMID}"
run "scp ${SSH_OPTS[*]} '$ARCHIVE_PATH' '${SSH_TARGET}:${REMOTE_ARCHIVE}'"
ssh_remote "pct push ${VMID} '${REMOTE_ARCHIVE}' '${CT_ARCHIVE}' && rm -f '${REMOTE_ARCHIVE}'"
CT_SCRIPT="${TMP_DIR}/currencicombo-ct-deploy.sh"
cat > "$CT_SCRIPT" <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
export DEBIAN_FRONTEND=noninteractive
ARCHIVE_PATH="__CT_ARCHIVE__"
REPO_DIR="__CT_REPO_DIR__"
need_pkg() {
dpkg -s "$1" >/dev/null 2>&1
}
apt-get update -qq
for pkg in ca-certificates curl git jq postgresql redis-server rsync build-essential; do
need_pkg "$pkg" || apt-get install -y -qq "$pkg"
done
if ! command -v node >/dev/null 2>&1 || ! node -v 2>/dev/null | grep -q '^v20\.'; then
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
apt-get install -y -qq nodejs
fi
systemctl enable --now postgresql >/dev/null 2>&1 || true
systemctl enable --now redis-server >/dev/null 2>&1 || true
if [[ ! -f /root/currencicombo-prephoenix-archive.tgz && -d /opt/currencicombo ]]; then
tar -czf /root/currencicombo-prephoenix-archive.tgz /opt/currencicombo /etc/currencicombo 2>/dev/null || true
fi
install -d -o root -g root -m 0755 "$(dirname "$REPO_DIR")"
rm -rf "$REPO_DIR"
mkdir -p "$REPO_DIR"
tar -xzf "$ARCHIVE_PATH" -C "$REPO_DIR"
rm -f "$ARCHIVE_PATH"
bash "$REPO_DIR/scripts/deployment/install.sh"
bash "$REPO_DIR/scripts/deployment/install-prune-cron.sh"
CC_GIT_REF=local bash "$REPO_DIR/scripts/deployment/deploy-currencicombo-8604.sh"
systemctl is-active currencicombo-orchestrator.service currencicombo-webapp.service
curl -fsS http://127.0.0.1:8080/ready
curl -fsS http://127.0.0.1:3000/ >/dev/null
EOF
perl -0pi -e "s|__CT_ARCHIVE__|${CT_ARCHIVE//|/\\|}|g; s|__CT_REPO_DIR__|${CT_REPO_DIR//|/\\|}|g" "$CT_SCRIPT"
log "running install + deploy inside CT ${VMID}"
pct_exec_script "$CT_SCRIPT"
if [[ "$DRY_RUN" -eq 0 ]]; then
log "updating NPMplus proxy host for ${PUBLIC_DOMAIN}"
AUTH_JSON="$(jq -nc --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')"
TOKEN_RESPONSE="$(curl -sk -X POST "$NPM_URL/api/tokens" -H 'Content-Type: application/json' -d "$AUTH_JSON" -c "$NPM_COOKIE_JAR")"
TOKEN="$(echo "$TOKEN_RESPONSE" | jq -r '.token // .accessToken // .access_token // .data.token // empty' 2>/dev/null)"
USE_COOKIE_AUTH=0
if [[ -z "$TOKEN" || "$TOKEN" == "null" ]]; then
if echo "$TOKEN_RESPONSE" | jq -e '.expires' >/dev/null 2>&1; then
USE_COOKIE_AUTH=1
else
die "NPMplus authentication failed"
fi
fi
npm_api() {
if [[ "$USE_COOKIE_AUTH" -eq 1 ]]; then
curl -sk -b "$NPM_COOKIE_JAR" "$@"
else
curl -sk -H "Authorization: Bearer $TOKEN" "$@"
fi
}
HOSTS_JSON="$(npm_api -X GET "$NPM_URL/api/nginx/proxy-hosts")"
HOST_ID="$(echo "$HOSTS_JSON" | jq -r --arg domain "$PUBLIC_DOMAIN" '
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
| map(select(.domain_names | type == "array"))
| map(select(any(.domain_names[]; . == $domain)))
| .[0].id // empty
')"
[[ -n "$HOST_ID" ]] || die "NPMplus proxy host not found for ${PUBLIC_DOMAIN}"
ADVANCED_CONFIG="$(cat <<CFG
location ^~ /api/ {
proxy_pass http://${CT_IP}:8080;
proxy_http_version 1.1;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header Connection \"\";
proxy_buffering off;
proxy_cache off;
proxy_read_timeout 24h;
proxy_send_timeout 24h;
add_header Cache-Control \"no-cache\";
}
CFG
)"
PAYLOAD="$(echo "$HOSTS_JSON" | jq -c --arg domain "$PUBLIC_DOMAIN" --arg host "$CT_IP" --arg advanced "$ADVANCED_CONFIG" '
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
| map(select(.domain_names | type == "array"))
| map(select(any(.domain_names[]; . == $domain)))
| .[0]
| {
domain_names,
forward_scheme: (.forward_scheme // "http"),
forward_host: $host,
forward_port: 3000,
access_list_id,
certificate_id,
ssl_forced,
caching_enabled,
block_exploits,
advanced_config: $advanced,
allow_websocket_upgrade,
http2_support,
hsts_enabled,
hsts_subdomains,
enabled
}
')"
[[ -n "$PAYLOAD" && "$PAYLOAD" != "null" ]] || die "failed to build NPMplus update payload"
UPDATE_RESPONSE="$(npm_api -X PUT "$NPM_URL/api/nginx/proxy-hosts/${HOST_ID}" -H 'Content-Type: application/json' -d "$PAYLOAD")"
echo "$UPDATE_RESPONSE" | jq -e '.id != null' >/dev/null 2>&1 || die "NPMplus proxy host update failed"
log "running public smoke checks"
HEADERS="$(curl -skI "$PUBLIC_URL/")"
echo "$HEADERS" | grep -q '^HTTP/2 200' || die "public root is not HTTP 200"
if echo "$HEADERS" | grep -qi '^x-nextjs-prerender:'; then
die "old Next.js headers still present on public root"
fi
curl -sk "$PUBLIC_URL/" | grep -F '<title>Solace Bank Group PLC — Treasury Management Portal</title>' >/dev/null || die "public title mismatch"
READY_BODY="$(curl -sk "$PUBLIC_URL/api/ready")"
echo "$READY_BODY" | grep -F '"ready":true' >/dev/null || die "public /api/ready failed"
curl -skN --max-time 5 -H 'Accept: text/event-stream' "$PUBLIC_URL/api/plans/demo-pay-014/status/stream" | grep -F '"type":"connected"' >/dev/null || die "public SSE smoke failed"
log "capturing EXT-* blocker summary"
ssh_remote "pct exec ${VMID} -- journalctl -u currencicombo-orchestrator.service -n 200 --no-pager | grep -E 'ExternalBlockers|EXT-' || true"
fi
log "CurrenciCombo Phoenix deploy completed from ${PHOENIX_DEPLOY_WORKSPACE}"