feat: add phoenix deploy and x402 service updates
This commit is contained in:
@@ -34,5 +34,8 @@ PHOENIX_PARTNER_KEYS=
|
||||
PUBLIC_SECTOR_MANIFEST_PATH=
|
||||
# Optional: proxmox repo root on host (manifest = $PHOENIX_REPO_ROOT/config/public-sector-program-manifest.json)
|
||||
PHOENIX_REPO_ROOT=/home/intlc/projects/proxmox
|
||||
# Gitea "cloudflare-sync" deploy target: allow scripts/deployment/gitea-cloudflare-sync.sh to call Cloudflare (1/true)
|
||||
#PHOENIX_CLOUDFLARE_SYNC=0
|
||||
# Optional zone for that sync: CLOUDFLARE_GITEA_SYNC_ZONE=d-bis.org
|
||||
# Optional: deploy target config file (defaults to phoenix-deploy-api/deploy-targets.json)
|
||||
DEPLOY_TARGETS_PATH=
|
||||
|
||||
@@ -71,6 +71,8 @@ curl -X POST "https://phoenix-api-host/api/deploy" \
|
||||
-d '{"repo":"d-bis/proxmox","branch":"main","sha":"abc123","target":"default"}'
|
||||
```
|
||||
|
||||
`deploy-to-phoenix.yml` also calls `target: "cloudflare-sync"`, which runs `scripts/deployment/gitea-cloudflare-sync.sh` under `PHOENIX_REPO_ROOT` (where `.env` and `PUBLIC_IP` are correct). Set **`PHOENIX_CLOUDFLARE_SYNC=1`** in phoenix or repo env before any Cloudflare API runs; the script skips when unset or when the commit does not touch Cloudflare/DNS paths. For a one-off without path filtering, use `target: "cloudflare-sync-force"`.
|
||||
|
||||
The API returns `404` when no matching deploy target exists for `{repo, branch, target}` and `500` when the target command fails.
|
||||
If a target defines `healthcheck`, the deploy is only marked successful after the post-deploy URL check passes.
|
||||
|
||||
|
||||
@@ -7,13 +7,11 @@
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "default",
|
||||
"description": "Deploy the Phoenix deploy API bundle to the dev VM on Proxmox.",
|
||||
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh",
|
||||
"--apply",
|
||||
"--start-ct"
|
||||
"phoenix-deploy-api/scripts/install-systemd.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
@@ -80,6 +78,29 @@
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/CurrenciCombo",
|
||||
"branch": "main",
|
||||
"target": "default",
|
||||
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://curucombo.xn--vov0g.com/api/ready",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"ready\":true",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
@@ -102,17 +123,39 @@
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/explorer-monorepo",
|
||||
"branch": "main",
|
||||
"target": "explorer-live",
|
||||
"description": "Redeploy the live explorer stack on VMID 5000 from the staged explorer-monorepo checkout.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-explorer-live-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"timeout_sec": 2400,
|
||||
"healthcheck": {
|
||||
"url": "https://blockscout.defi-oracle.io/api/config/capabilities",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"chainId\"",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "default",
|
||||
"description": "Deploy the Phoenix deploy API bundle to the dev VM on Proxmox.",
|
||||
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh",
|
||||
"--apply",
|
||||
"--start-ct"
|
||||
"phoenix-deploy-api/scripts/install-systemd.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
@@ -200,6 +243,53 @@
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/CurrenciCombo",
|
||||
"branch": "master",
|
||||
"target": "default",
|
||||
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://curucombo.xn--vov0g.com/api/ready",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"ready\":true",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/explorer-monorepo",
|
||||
"branch": "master",
|
||||
"target": "explorer-live",
|
||||
"description": "Redeploy the live explorer stack on VMID 5000 from the staged explorer-monorepo checkout.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-explorer-live-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"timeout_sec": 2400,
|
||||
"healthcheck": {
|
||||
"url": "https://blockscout.defi-oracle.io/api/config/capabilities",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"chainId\"",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -26,12 +26,18 @@ if [[ -f "$REPO_ROOT/config/public-sector-program-manifest.json" ]]; then
|
||||
else
|
||||
echo "WARN: $REPO_ROOT/config/public-sector-program-manifest.json missing — set PUBLIC_SECTOR_MANIFEST_PATH in .env"
|
||||
fi
|
||||
[ -f "$APP_DIR/.env" ] && cp "$APP_DIR/.env" "$TARGET/.env" || [ -f "$APP_DIR/.env.example" ] && cp "$APP_DIR/.env.example" "$TARGET/.env" || true
|
||||
if [[ -f "$TARGET/.env" ]]; then
|
||||
echo "Preserving existing $TARGET/.env"
|
||||
elif [[ -f "$APP_DIR/.env" ]]; then
|
||||
cp "$APP_DIR/.env" "$TARGET/.env"
|
||||
elif [[ -f "$APP_DIR/.env.example" ]]; then
|
||||
cp "$APP_DIR/.env.example" "$TARGET/.env"
|
||||
fi
|
||||
chown -R root:root "$TARGET"
|
||||
cd "$TARGET" && npm install --omit=dev
|
||||
cp "$APP_DIR/phoenix-deploy-api.service" /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
systemctl enable phoenix-deploy-api
|
||||
systemctl start phoenix-deploy-api
|
||||
systemctl restart phoenix-deploy-api
|
||||
echo "Done. Status: $(systemctl is-active phoenix-deploy-api)"
|
||||
echo "Edit $TARGET/.env (GITEA_TOKEN, etc.) and run: systemctl restart phoenix-deploy-api"
|
||||
|
||||
@@ -21,7 +21,7 @@ import https from 'https';
|
||||
import path from 'path';
|
||||
import { promisify } from 'util';
|
||||
import { execFile as execFileCallback } from 'child_process';
|
||||
import { readFileSync, existsSync } from 'fs';
|
||||
import { cpSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { fileURLToPath } from 'url';
|
||||
import express from 'express';
|
||||
|
||||
@@ -31,6 +31,13 @@ const PORT = parseInt(process.env.PORT || '4001', 10);
|
||||
const GITEA_URL = (process.env.GITEA_URL || 'https://gitea.d-bis.org').replace(/\/$/, '');
|
||||
const GITEA_TOKEN = process.env.GITEA_TOKEN || '';
|
||||
const WEBHOOK_SECRET = process.env.PHOENIX_DEPLOY_SECRET || '';
|
||||
const PHOENIX_REPO_ROOT_DEFAULT = (process.env.PHOENIX_REPO_ROOT_DEFAULT || '/srv/projects/proxmox').trim();
|
||||
const ATOMIC_SWAP_REPO = (process.env.PHOENIX_ATOMIC_SWAP_REPO || 'd-bis/atomic-swap-dapp').trim();
|
||||
const ATOMIC_SWAP_REF = (process.env.PHOENIX_ATOMIC_SWAP_REF || 'main').trim();
|
||||
const CROSS_CHAIN_PMM_LPS_REPO = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REPO || '').trim();
|
||||
const CROSS_CHAIN_PMM_LPS_REF = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REF || 'main').trim();
|
||||
const SMOM_DBIS_138_REPO = (process.env.PHOENIX_SMOM_DBIS_138_REPO || '').trim();
|
||||
const SMOM_DBIS_138_REF = (process.env.PHOENIX_SMOM_DBIS_138_REF || 'main').trim();
|
||||
|
||||
const PROXMOX_HOST = process.env.PROXMOX_HOST || '';
|
||||
const PROXMOX_PORT = parseInt(process.env.PROXMOX_PORT || '8006', 10);
|
||||
@@ -47,9 +54,13 @@ const PARTNER_KEYS = (process.env.PHOENIX_PARTNER_KEYS || '').split(',').map((k)
|
||||
const WEBHOOK_DEPLOY_ENABLED = process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === '1' || process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === 'true';
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
function expandEnvTokens(value) {
|
||||
function expandEnvTokens(value, env = process.env) {
|
||||
if (typeof value !== 'string') return value;
|
||||
return value.replace(/\$\{([A-Z0-9_]+)\}/gi, (_, key) => process.env[key] || '');
|
||||
return value.replace(/\$\{([A-Z0-9_]+)\}/gi, (_, key) => env[key] || '');
|
||||
}
|
||||
|
||||
function resolvePhoenixRepoRoot() {
|
||||
return (process.env.PHOENIX_REPO_ROOT || PHOENIX_REPO_ROOT_DEFAULT || '').trim().replace(/\/$/, '');
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -155,25 +166,151 @@ async function verifyHealthCheck(healthcheck) {
|
||||
throw new Error(`Health check failed for ${healthcheck.url}: ${lastError?.message || 'unknown error'}`);
|
||||
}
|
||||
|
||||
async function runDeployTarget(definition, configDefaults, context) {
|
||||
async function downloadRepoArchive({ owner, repo, ref, archivePath, authToken }) {
|
||||
const archiveRef = `${ref}.tar.gz`;
|
||||
const url = `${GITEA_URL}/api/v1/repos/${owner}/${repo}/archive/${archiveRef}`;
|
||||
const headers = {};
|
||||
if (authToken) headers.Authorization = `token ${authToken}`;
|
||||
const res = await fetch(url, { headers });
|
||||
if (!res.ok) {
|
||||
throw new Error(`Failed to download archive ${owner}/${repo}@${ref}: HTTP ${res.status}`);
|
||||
}
|
||||
const buffer = Buffer.from(await res.arrayBuffer());
|
||||
writeFileSync(archivePath, buffer);
|
||||
}
|
||||
|
||||
function syncExtractedTree({ sourceRoot, destRoot, entries = null }) {
|
||||
mkdirSync(destRoot, { recursive: true });
|
||||
const selectedEntries = Array.isArray(entries) ? entries : readdirSync(sourceRoot);
|
||||
for (const entry of selectedEntries) {
|
||||
const sourcePath = path.join(sourceRoot, entry);
|
||||
if (!existsSync(sourcePath)) continue;
|
||||
const destPath = path.join(destRoot, entry);
|
||||
rmSync(destPath, { recursive: true, force: true });
|
||||
cpSync(sourcePath, destPath, { recursive: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function syncRepoArchive({ owner, repo, ref, destRoot, entries = null, authToken = '' }) {
|
||||
const tempDir = mkdtempSync('/tmp/phoenix-archive-');
|
||||
const archivePath = path.join(tempDir, 'repo.tar.gz');
|
||||
const extractDir = path.join(tempDir, 'extract');
|
||||
mkdirSync(extractDir, { recursive: true });
|
||||
try {
|
||||
await downloadRepoArchive({ owner, repo, ref, archivePath, authToken });
|
||||
await execFile('tar', ['-xzf', archivePath, '-C', extractDir]);
|
||||
const [rootDir] = readdirSync(extractDir);
|
||||
if (!rootDir) {
|
||||
throw new Error(`Archive for ${owner}/${repo}@${ref} was empty`);
|
||||
}
|
||||
syncExtractedTree({
|
||||
sourceRoot: path.join(extractDir, rootDir),
|
||||
destRoot,
|
||||
entries,
|
||||
});
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function prepareDeployWorkspace({ repo, branch, sha, target }) {
|
||||
const repoRoot = resolvePhoenixRepoRoot();
|
||||
if (!repoRoot) {
|
||||
throw new Error('PHOENIX_REPO_ROOT is not configured');
|
||||
}
|
||||
|
||||
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
|
||||
const externalWorkspaceRoot = path.join(repoRoot, '.phoenix-deploy-workspaces', owner, repoName);
|
||||
|
||||
// Manual smoke tests can target the already-staged local workspace without
|
||||
// forcing an archive sync from Gitea.
|
||||
if (sha === 'HEAD' || sha === 'local') {
|
||||
mkdirSync(repoRoot, { recursive: true });
|
||||
if (repo !== 'd-bis/proxmox') {
|
||||
mkdirSync(externalWorkspaceRoot, { recursive: true });
|
||||
}
|
||||
return {
|
||||
PHOENIX_REPO_ROOT: repoRoot,
|
||||
PROXMOX_REPO_ROOT: repoRoot,
|
||||
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
|
||||
};
|
||||
}
|
||||
|
||||
const ref = sha || branch || 'main';
|
||||
|
||||
if (repo === 'd-bis/proxmox') {
|
||||
await syncRepoArchive({
|
||||
owner,
|
||||
repo: repoName,
|
||||
ref,
|
||||
destRoot: repoRoot,
|
||||
entries: ['config', 'phoenix-deploy-api', 'reports', 'scripts', 'token-lists'],
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
} else {
|
||||
await syncRepoArchive({
|
||||
owner,
|
||||
repo: repoName,
|
||||
ref,
|
||||
destRoot: externalWorkspaceRoot,
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
|
||||
if (repo === 'd-bis/proxmox' && target === 'atomic-swap-dapp-live') {
|
||||
const [swapOwner, swapRepo] = ATOMIC_SWAP_REPO.includes('/')
|
||||
? ATOMIC_SWAP_REPO.split('/')
|
||||
: ['d-bis', ATOMIC_SWAP_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: swapOwner,
|
||||
repo: swapRepo,
|
||||
ref: ATOMIC_SWAP_REF,
|
||||
destRoot: path.join(repoRoot, 'atomic-swap-dapp'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
|
||||
if (CROSS_CHAIN_PMM_LPS_REPO) {
|
||||
const [lpsOwner, lpsRepo] = CROSS_CHAIN_PMM_LPS_REPO.includes('/')
|
||||
? CROSS_CHAIN_PMM_LPS_REPO.split('/')
|
||||
: ['d-bis', CROSS_CHAIN_PMM_LPS_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: lpsOwner,
|
||||
repo: lpsRepo,
|
||||
ref: CROSS_CHAIN_PMM_LPS_REF,
|
||||
destRoot: path.join(repoRoot, 'cross-chain-pmm-lps'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
|
||||
if (SMOM_DBIS_138_REPO) {
|
||||
const [smomOwner, smomRepo] = SMOM_DBIS_138_REPO.includes('/')
|
||||
? SMOM_DBIS_138_REPO.split('/')
|
||||
: ['d-bis', SMOM_DBIS_138_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: smomOwner,
|
||||
repo: smomRepo,
|
||||
ref: SMOM_DBIS_138_REF,
|
||||
destRoot: path.join(repoRoot, 'smom-dbis-138'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
PHOENIX_REPO_ROOT: repoRoot,
|
||||
PROXMOX_REPO_ROOT: repoRoot,
|
||||
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
|
||||
};
|
||||
}
|
||||
|
||||
async function runDeployTarget(definition, configDefaults, context, envOverrides = {}) {
|
||||
if (!Array.isArray(definition.command) || definition.command.length === 0) {
|
||||
throw new Error('Deploy target is missing a command array');
|
||||
}
|
||||
|
||||
const cwd = expandEnvTokens(definition.cwd || configDefaults.cwd || process.cwd());
|
||||
const timeoutSeconds = Number(definition.timeout_sec || configDefaults.timeout_sec || 1800);
|
||||
const timeout = Number.isFinite(timeoutSeconds) && timeoutSeconds > 0 ? timeoutSeconds * 1000 : 1800 * 1000;
|
||||
const command = definition.command.map((part) => expandEnvTokens(part));
|
||||
const missingEnv = (definition.required_env || []).filter((key) => !process.env[key]);
|
||||
if (missingEnv.length > 0) {
|
||||
throw new Error(`Missing required env for deploy target: ${missingEnv.join(', ')}`);
|
||||
}
|
||||
if (!existsSync(cwd)) {
|
||||
throw new Error(`Deploy working directory does not exist: ${cwd}`);
|
||||
}
|
||||
|
||||
const childEnv = {
|
||||
...process.env,
|
||||
...envOverrides,
|
||||
PHOENIX_DEPLOY_REPO: context.repo,
|
||||
PHOENIX_DEPLOY_BRANCH: context.branch,
|
||||
PHOENIX_DEPLOY_SHA: context.sha || '',
|
||||
@@ -181,6 +318,18 @@ async function runDeployTarget(definition, configDefaults, context) {
|
||||
PHOENIX_DEPLOY_TRIGGER: context.trigger,
|
||||
};
|
||||
|
||||
const cwd = expandEnvTokens(definition.cwd || configDefaults.cwd || process.cwd(), childEnv);
|
||||
const timeoutSeconds = Number(definition.timeout_sec || configDefaults.timeout_sec || 1800);
|
||||
const timeout = Number.isFinite(timeoutSeconds) && timeoutSeconds > 0 ? timeoutSeconds * 1000 : 1800 * 1000;
|
||||
const command = definition.command.map((part) => expandEnvTokens(part, childEnv));
|
||||
const missingEnv = (definition.required_env || []).filter((key) => !childEnv[key]);
|
||||
if (missingEnv.length > 0) {
|
||||
throw new Error(`Missing required env for deploy target: ${missingEnv.join(', ')}`);
|
||||
}
|
||||
if (!existsSync(cwd)) {
|
||||
throw new Error(`Deploy working directory does not exist: ${cwd}`);
|
||||
}
|
||||
|
||||
const { stdout, stderr } = await execFile(command[0], command.slice(1), {
|
||||
cwd,
|
||||
env: childEnv,
|
||||
@@ -237,15 +386,22 @@ async function executeDeploy({ repo, branch = 'main', target = 'default', sha =
|
||||
|
||||
let deployResult = null;
|
||||
let deployError = null;
|
||||
let envOverrides = {};
|
||||
|
||||
try {
|
||||
envOverrides = await prepareDeployWorkspace({
|
||||
repo,
|
||||
branch,
|
||||
sha: commitSha,
|
||||
target: wantedTarget,
|
||||
});
|
||||
deployResult = await runDeployTarget(match, config.defaults, {
|
||||
repo,
|
||||
branch,
|
||||
sha: commitSha,
|
||||
target: wantedTarget,
|
||||
trigger,
|
||||
});
|
||||
}, envOverrides);
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'success', `Deployed to ${wantedTarget}`);
|
||||
}
|
||||
@@ -286,6 +442,7 @@ async function executeDeploy({ repo, branch = 'main', target = 'default', sha =
|
||||
success: Boolean(deployResult),
|
||||
command: deployResult?.command,
|
||||
cwd: deployResult?.cwd,
|
||||
phoenix_repo_root: envOverrides.PHOENIX_REPO_ROOT || null,
|
||||
error: deployError?.message || null,
|
||||
};
|
||||
const body = JSON.stringify(payload);
|
||||
|
||||
@@ -18,5 +18,9 @@ RPC_URL_138=https://rpc-http-pub.d-bis.org
|
||||
# Optional: Chain 651940 (Alltra) RPC when X402_USE_ALLTRA=true
|
||||
CHAIN_651940_RPC_URL=https://mainnet-rpc.alltra.global
|
||||
|
||||
# Optional: canonical public origin for x402 resourceUrl generation behind nginx / reverse proxy
|
||||
# Example: https://api.example.com
|
||||
PUBLIC_BASE_URL=
|
||||
|
||||
# Optional: server port (default 4020)
|
||||
PORT=4020
|
||||
|
||||
@@ -30,6 +30,8 @@ npm run dev
|
||||
|
||||
Clients must send payment authorization in the `PAYMENT-SIGNATURE` or `X-PAYMENT` header (e.g. using thirdweb’s `useFetchWithPayment` or equivalent).
|
||||
|
||||
If you deploy behind nginx, HAProxy, or another reverse proxy, set `PUBLIC_BASE_URL` so the server settles against the same absolute `resourceUrl` your client is paying for. This mirrors the explicit `resourceUrl: "https://api.example.com/premium-content"` style from thirdweb’s examples.
|
||||
|
||||
## Chain and token support
|
||||
|
||||
x402 requires the payment token to support **ERC-2612 permit** or **ERC-3009** when using thirdweb facilitator. For **Alltra (651940)** we use **local verification** (no facilitator): server returns 402 + `PAYMENT-REQUIRED`, client pays USDC on 651940 and retries with `PAYMENT-SIGNATURE` + `txHash`; server verifies settlement on-chain. See [X402_ALLTRA_ENDPOINT_SPEC.md](../docs/04-configuration/X402_ALLTRA_ENDPOINT_SPEC.md).
|
||||
@@ -54,6 +56,7 @@ Verification script for token support:
|
||||
| `X402_USE_CHAIN_138` | No | `true` to use Chain 138 (default `false`) |
|
||||
| `RPC_URL_138` | No | Chain 138 RPC when using Chain 138 (default public RPC) |
|
||||
| `CHAIN_651940_RPC_URL` | No | Alltra RPC when `X402_USE_ALLTRA=true` (default mainnet-rpc.alltra.global) |
|
||||
| `PUBLIC_BASE_URL` | No | Canonical public origin used to build x402 `resourceUrl` behind a reverse proxy |
|
||||
| `PORT` | No | Server port (default `4020`) |
|
||||
|
||||
## References
|
||||
|
||||
@@ -12,6 +12,7 @@ import { randomUUID } from "crypto";
|
||||
|
||||
const app = express();
|
||||
app.use(express.json());
|
||||
app.set("trust proxy", true);
|
||||
|
||||
const PORT = process.env.PORT || 4020;
|
||||
const secretKey = process.env.THIRDWEB_SECRET_KEY;
|
||||
@@ -20,6 +21,7 @@ const useChain138 = process.env.X402_USE_CHAIN_138 === "true";
|
||||
const useAlltra = process.env.X402_USE_ALLTRA === "true";
|
||||
const rpcUrl138 = process.env.RPC_URL_138 || "https://rpc-http-pub.d-bis.org";
|
||||
const rpcUrl651940 = process.env.CHAIN_651940_RPC_URL || process.env.RPC_URL_651940 || "https://mainnet-rpc.alltra.global";
|
||||
const publicBaseUrl = process.env.PUBLIC_BASE_URL?.trim().replace(/\/+$/, "") || "";
|
||||
|
||||
/** Custom Chain 138 for thirdweb (DeFi Oracle Meta Mainnet) */
|
||||
const chain138 = defineChain({
|
||||
@@ -96,6 +98,29 @@ function markReplayConsumed(payer, resourceId, nonce) {
|
||||
replayStore.set(replayKey(payer, resourceId, nonce), Date.now() + REPLAY_TTL_MS);
|
||||
}
|
||||
|
||||
function getPaymentData(req) {
|
||||
return (
|
||||
req.headers["payment-signature"] ||
|
||||
req.headers["PAYMENT-SIGNATURE"] ||
|
||||
req.headers["x-payment"] ||
|
||||
req.headers["X-PAYMENT"] ||
|
||||
undefined
|
||||
);
|
||||
}
|
||||
|
||||
function buildResourceUrl(req) {
|
||||
if (publicBaseUrl) {
|
||||
return `${publicBaseUrl}${req.originalUrl || req.url}`;
|
||||
}
|
||||
|
||||
const forwardedProto = req.get("x-forwarded-proto");
|
||||
const forwardedHost = req.get("x-forwarded-host");
|
||||
const proto = forwardedProto || req.protocol || "http";
|
||||
const host = forwardedHost || req.get("host");
|
||||
|
||||
return host ? `${proto}://${host}${req.originalUrl || req.url}` : "";
|
||||
}
|
||||
|
||||
/** Price: Alltra USDC, Chain 138 cUSDC, or Arbitrum Sepolia default. */
|
||||
function getPrice() {
|
||||
if (useAlltra) {
|
||||
@@ -156,11 +181,7 @@ async function handlePaidRouteAlltra(req, res) {
|
||||
}
|
||||
|
||||
const resourceId = `${req.method} ${req.originalUrl || req.url}`;
|
||||
const paymentData =
|
||||
req.headers["payment-signature"] ||
|
||||
req.headers["PAYMENT-SIGNATURE"] ||
|
||||
req.headers["x-payment"] ||
|
||||
req.headers["X-PAYMENT"];
|
||||
const paymentData = getPaymentData(req);
|
||||
|
||||
if (!paymentData || paymentData.trim() === "") {
|
||||
const paymentRequired = buildPaymentRequired(resourceId);
|
||||
@@ -212,11 +233,7 @@ async function handlePaidRoute(req, res) {
|
||||
return handlePaidRouteAlltra(req, res);
|
||||
}
|
||||
|
||||
const paymentData =
|
||||
req.headers["payment-signature"] ||
|
||||
req.headers["PAYMENT-SIGNATURE"] ||
|
||||
req.headers["x-payment"] ||
|
||||
req.headers["X-PAYMENT"];
|
||||
const paymentData = getPaymentData(req);
|
||||
|
||||
if (!thirdwebFacilitator || !serverWalletAddress) {
|
||||
return res.status(503).json({
|
||||
@@ -225,8 +242,7 @@ async function handlePaidRoute(req, res) {
|
||||
});
|
||||
}
|
||||
|
||||
const resourceUrl =
|
||||
(req.protocol + "://" + req.get("host") + req.originalUrl) || "";
|
||||
const resourceUrl = buildResourceUrl(req);
|
||||
const method = req.method;
|
||||
|
||||
const result = await settlePayment({
|
||||
|
||||
Reference in New Issue
Block a user