Compare commits

..

3 Commits

Author SHA1 Message Date
defiQUG
abfca65342 fix(ops): map dev VM 5700 to r630-04; add phoenix-deploy-api deploy script
Place VMID 5700 (dev-vm) on PROXMOX_HOST_R630_04 per live cluster placement.
Add LAN deploy helper that bundles phoenix-deploy-api + public-sector manifest,
pushes to PVE, and runs install-systemd on CT 5700.

Made-with: Cursor
2026-04-17 04:48:17 -07:00
defiQUG
8511bf092b Publish Chain 138 route and flash verification recovery 2026-04-16 13:49:46 -07:00
defiQUG
dc21a3f302 Ignore local runtime artifacts 2026-04-16 11:24:09 -07:00
26 changed files with 1740 additions and 1995 deletions

View File

@@ -6,10 +6,6 @@
2. Make changes, ensure tests pass
3. Open a pull request
Deploy workflow policy:
`main` and `master` are both deploy-triggering branches, so `.gitea/workflow-sources/deploy-to-phoenix.yml` and `.gitea/workflow-sources/validate-on-pr.yml` must stay identical across both branches.
Use `bash scripts/verify/sync-gitea-workflows.sh` after editing workflow-source files, and `bash scripts/verify/run-all-validation.sh --skip-genesis` to catch workflow drift before push.
## Pull Requests
- Use the PR template when opening a PR

View File

@@ -1,125 +0,0 @@
# Canonical deploy workflow. Keep source and checked-in workflow copies byte-identical.
# Validation checks both file sync and main/master parity.
name: Deploy to Phoenix
on:
push:
branches: [main, master]
workflow_dispatch:
jobs:
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Fetch deploy branches for workflow parity check
run: |
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
if git remote | grep -qx gitea; then
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
fi
git fetch --depth=1 "$REMOTE" main master
- name: Install validation dependencies
run: |
corepack enable
pnpm install --frozen-lockfile
# The cW* mesh matrix and deployment-status validators read
# cross-chain-pmm-lps/config/*.json. The parent checkout does not
# materialize submodules by default, and .gitmodules mixes public HTTPS
# with SSH URLs, so clone only the required public validation dependency.
- name: Materialize cross-chain-pmm-lps
run: |
set -euo pipefail
if [ ! -f cross-chain-pmm-lps/config/deployment-status.json ]; then
rm -rf cross-chain-pmm-lps
git clone --depth=1 \
https://gitea.d-bis.org/d-bis/cross-chain-pmm-lps.git \
cross-chain-pmm-lps
fi
- name: Run repo validation gate
run: |
bash scripts/verify/run-all-validation.sh --skip-genesis
deploy:
needs: validate
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Trigger Phoenix deployment
run: |
set -euo pipefail
SHA="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
set +e
curl -sSf --retry 3 --retry-connrefused --retry-delay 10 --retry-max-time 180 \
--connect-timeout 10 --max-time 120 \
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
-H "Content-Type: application/json" \
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"default\"}"
rc="$?"
set -e
if [ "$rc" -eq 52 ]; then
HEALTH_URL="${{ secrets.PHOENIX_DEPLOY_URL }}"
HEALTH_URL="${HEALTH_URL%/api/deploy}/health"
echo "Phoenix deploy API restarted during self-deploy; verifying ${HEALTH_URL}"
for i in $(seq 1 12); do
if curl -fsS --max-time 5 "$HEALTH_URL"; then
exit 0
fi
sleep 5
done
fi
exit "$rc"
deploy-atomic-swap-dapp:
needs: deploy
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Trigger Atomic Swap dApp deployment (Phoenix)
run: |
set -euo pipefail
SHA="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
curl -sSf \
--connect-timeout 10 --max-time 900 \
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
-H "Content-Type: application/json" \
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"atomic-swap-dapp-live\"}"
# After app deploy, ask Phoenix to run path-gated Cloudflare DNS sync on the host that has
# PHOENIX_REPO_ROOT + .env (not on this runner). Skips unless PHOENIX_CLOUDFLARE_SYNC=1 on that host.
# continue-on-error: first-time or missing opt-in should not block the main deploy.
cloudflare:
needs:
- deploy
- deploy-atomic-swap-dapp
runs-on: ubuntu-latest
continue-on-error: true
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Request Cloudflare DNS sync (Phoenix)
run: |
set -euo pipefail
SHA="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
curl -sSf --retry 5 --retry-all-errors --retry-connrefused --retry-delay 10 --retry-max-time 300 \
--connect-timeout 10 --max-time 120 \
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
-H "Content-Type: application/json" \
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"cloudflare-sync\"}" \
|| { echo "Cloudflare DNS sync request failed; optional sync is non-blocking."; exit 0; }

View File

@@ -1,33 +0,0 @@
# Canonical PR validation workflow. Keep source and checked-in workflow copies byte-identical.
# Validation checks both file sync and main/master parity.
# PR-only: push validation already runs in deploy-to-phoenix.yml; this gives PRs the same
# no-LAN checks without the deploy job (and without deploy secrets).
name: Validate (PR)
on:
pull_request:
types: [opened, synchronize, reopened]
branches: [main, master]
workflow_dispatch:
jobs:
run-all-validation:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Fetch deploy branches for workflow parity check
run: |
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
if git remote | grep -qx gitea; then
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
fi
git fetch --depth=1 "$REMOTE" main master
- name: Install validation dependencies
run: |
corepack enable
pnpm install --frozen-lockfile
# Optional: set org/repo variable URA_STRICT_CLOSURE=1 to fail PRs while pilot placeholders
# remain in manifest (see scripts/ura/validate-manifest-closure.mjs). Not enabled by default.
- name: run-all-validation (no LAN, no genesis)
env:
URA_STRICT_CLOSURE: ${{ vars.URA_STRICT_CLOSURE }}
run: bash scripts/verify/run-all-validation.sh --skip-genesis

View File

@@ -1,52 +1,11 @@
# Canonical deploy workflow. Keep source and checked-in workflow copies byte-identical.
# Validation checks both file sync and main/master parity.
name: Deploy to Phoenix
on:
push:
branches: [main, master]
workflow_dispatch:
jobs:
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Fetch deploy branches for workflow parity check
run: |
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
if git remote | grep -qx gitea; then
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
fi
git fetch --depth=1 "$REMOTE" main master
- name: Install validation dependencies
run: |
corepack enable
pnpm install --frozen-lockfile
# The cW* mesh matrix and deployment-status validators read
# cross-chain-pmm-lps/config/*.json. The parent checkout does not
# materialize submodules by default, and .gitmodules mixes public HTTPS
# with SSH URLs, so clone only the required public validation dependency.
- name: Materialize cross-chain-pmm-lps
run: |
set -euo pipefail
if [ ! -f cross-chain-pmm-lps/config/deployment-status.json ]; then
rm -rf cross-chain-pmm-lps
git clone --depth=1 \
https://gitea.d-bis.org/d-bis/cross-chain-pmm-lps.git \
cross-chain-pmm-lps
fi
- name: Run repo validation gate
run: |
bash scripts/verify/run-all-validation.sh --skip-genesis
deploy:
needs: validate
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -54,72 +13,8 @@ jobs:
- name: Trigger Phoenix deployment
run: |
set -euo pipefail
SHA="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
set +e
curl -sSf --retry 3 --retry-connrefused --retry-delay 10 --retry-max-time 180 \
--connect-timeout 10 --max-time 120 \
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
curl -sSf -X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
-H "Content-Type: application/json" \
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"default\"}"
rc="$?"
set -e
if [ "$rc" -eq 52 ]; then
HEALTH_URL="${{ secrets.PHOENIX_DEPLOY_URL }}"
HEALTH_URL="${HEALTH_URL%/api/deploy}/health"
echo "Phoenix deploy API restarted during self-deploy; verifying ${HEALTH_URL}"
for i in $(seq 1 12); do
if curl -fsS --max-time 5 "$HEALTH_URL"; then
exit 0
fi
sleep 5
done
fi
exit "$rc"
deploy-atomic-swap-dapp:
needs: deploy
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Trigger Atomic Swap dApp deployment (Phoenix)
run: |
set -euo pipefail
SHA="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
curl -sSf \
--connect-timeout 10 --max-time 900 \
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
-H "Content-Type: application/json" \
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"atomic-swap-dapp-live\"}"
# After app deploy, ask Phoenix to run path-gated Cloudflare DNS sync on the host that has
# PHOENIX_REPO_ROOT + .env (not on this runner). Skips unless PHOENIX_CLOUDFLARE_SYNC=1 on that host.
# continue-on-error: first-time or missing opt-in should not block the main deploy.
cloudflare:
needs:
- deploy
- deploy-atomic-swap-dapp
runs-on: ubuntu-latest
continue-on-error: true
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Request Cloudflare DNS sync (Phoenix)
run: |
set -euo pipefail
SHA="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
curl -sSf --retry 5 --retry-all-errors --retry-connrefused --retry-delay 10 --retry-max-time 300 \
--connect-timeout 10 --max-time 120 \
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
-H "Content-Type: application/json" \
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"cloudflare-sync\"}" \
|| { echo "Cloudflare DNS sync request failed; optional sync is non-blocking."; exit 0; }
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${{ gitea.sha }}\",\"branch\":\"${{ gitea.ref_name }}\"}"
continue-on-error: true

View File

@@ -1,33 +0,0 @@
# Canonical PR validation workflow. Keep source and checked-in workflow copies byte-identical.
# Validation checks both file sync and main/master parity.
# PR-only: push validation already runs in deploy-to-phoenix.yml; this gives PRs the same
# no-LAN checks without the deploy job (and without deploy secrets).
name: Validate (PR)
on:
pull_request:
types: [opened, synchronize, reopened]
branches: [main, master]
workflow_dispatch:
jobs:
run-all-validation:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Fetch deploy branches for workflow parity check
run: |
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
if git remote | grep -qx gitea; then
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
fi
git fetch --depth=1 "$REMOTE" main master
- name: Install validation dependencies
run: |
corepack enable
pnpm install --frozen-lockfile
# Optional: set org/repo variable URA_STRICT_CLOSURE=1 to fail PRs while pilot placeholders
# remain in manifest (see scripts/ura/validate-manifest-closure.mjs). Not enabled by default.
- name: run-all-validation (no LAN, no genesis)
env:
URA_STRICT_CLOSURE: ${{ vars.URA_STRICT_CLOSURE }}
run: bash scripts/verify/run-all-validation.sh --skip-genesis

5
.gitignore vendored
View File

@@ -68,6 +68,7 @@ out/
# Temporary files
*.tmp
*.temp
.tmp-*.cjs
# Environment backup files (Security: Prevent committing backup files with secrets)
*.env.backup
@@ -97,6 +98,10 @@ reports/status/live_inventory_*.json
reports/status/hardware_poll_*.txt
reports/status/lxc_cluster_health_*.json
reports/status/lxc_cluster_health_*.txt
reports/status/*runtime-env*.env
reports/status/*operator-rpcs*.env
reports/status/*_runtime.env
reports/status/*.tar.gz
# Wormhole AI docs mirror (sync with scripts/doc/sync-wormhole-ai-resources.sh; keep manifest.json committable)
third-party/wormhole-ai-docs/**

View File

@@ -2076,10 +2076,10 @@
"baseSymbol": "cWETH",
"quoteSymbol": "USDC",
"poolAddress": "0xd012000000000000000000000000000000000001",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_mainnet",
"venue": "dodo_pmm",
@@ -2091,10 +2091,10 @@
"baseSymbol": "cWETH",
"quoteSymbol": "WETH",
"poolAddress": "0xd011000000000000000000000000000000000001",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_mainnet",
"venue": "dodo_pmm",
@@ -2150,10 +2150,10 @@
"baseSymbol": "cWETHL2",
"quoteSymbol": "USDC",
"poolAddress": "0xd02200000000000000000000000000000000000a",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_l2",
"venue": "dodo_pmm",
@@ -2165,10 +2165,10 @@
"baseSymbol": "cWETHL2",
"quoteSymbol": "WETH",
"poolAddress": "0xd02100000000000000000000000000000000000a",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_l2",
"venue": "dodo_pmm",
@@ -2246,10 +2246,10 @@
"baseSymbol": "cWXDAI",
"quoteSymbol": "USDC",
"poolAddress": "0xd072000000000000000000000000000000000064",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "xdai",
"venue": "dodo_pmm",
@@ -2261,10 +2261,10 @@
"baseSymbol": "cWXDAI",
"quoteSymbol": "WXDAI",
"poolAddress": "0xd071000000000000000000000000000000000064",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "xdai",
"venue": "dodo_pmm",
@@ -2276,10 +2276,10 @@
"baseSymbol": "cWWEMIX",
"quoteSymbol": "USDC",
"poolAddress": "0xd092000000000000000000000000000000000457",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "wemix",
"venue": "dodo_pmm",
@@ -2291,10 +2291,10 @@
"baseSymbol": "cWWEMIX",
"quoteSymbol": "WWEMIX",
"poolAddress": "0xd091000000000000000000000000000000000457",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "wemix",
"venue": "dodo_pmm",
@@ -2339,10 +2339,10 @@
"baseSymbol": "cWPOL",
"quoteSymbol": "USDC",
"poolAddress": "0xd042000000000000000000000000000000000089",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "pol",
"venue": "dodo_pmm",
@@ -2354,10 +2354,10 @@
"baseSymbol": "cWPOL",
"quoteSymbol": "WPOL",
"poolAddress": "0xd041000000000000000000000000000000000089",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "pol",
"venue": "dodo_pmm",
@@ -2413,10 +2413,10 @@
"baseSymbol": "cWCRO",
"quoteSymbol": "USDT",
"poolAddress": "0xd062000000000000000000000000000000000019",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "cro",
"venue": "dodo_pmm",
@@ -2428,10 +2428,10 @@
"baseSymbol": "cWCRO",
"quoteSymbol": "WCRO",
"poolAddress": "0xd061000000000000000000000000000000000019",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "cro",
"venue": "dodo_pmm",
@@ -2487,10 +2487,10 @@
"baseSymbol": "cWETHL2",
"quoteSymbol": "USDC",
"poolAddress": "0xd02200000000000000000000000000000000a4b1",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_l2",
"venue": "dodo_pmm",
@@ -2502,10 +2502,10 @@
"baseSymbol": "cWETHL2",
"quoteSymbol": "WETH",
"poolAddress": "0xd02100000000000000000000000000000000a4b1",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_l2",
"venue": "dodo_pmm",
@@ -2572,10 +2572,10 @@
"baseSymbol": "cWCELO",
"quoteSymbol": "USDC",
"poolAddress": "0xd08200000000000000000000000000000000a4ec",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "celo",
"venue": "dodo_pmm",
@@ -2587,10 +2587,10 @@
"baseSymbol": "cWCELO",
"quoteSymbol": "WCELO",
"poolAddress": "0xd08100000000000000000000000000000000a4ec",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "celo",
"venue": "dodo_pmm",
@@ -2635,10 +2635,10 @@
"baseSymbol": "cWAVAX",
"quoteSymbol": "USDC",
"poolAddress": "0xd05200000000000000000000000000000000a86a",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "avax",
"venue": "dodo_pmm",
@@ -2650,10 +2650,10 @@
"baseSymbol": "cWAVAX",
"quoteSymbol": "WAVAX",
"poolAddress": "0xd05100000000000000000000000000000000a86a",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "avax",
"venue": "dodo_pmm",
@@ -2720,10 +2720,10 @@
"baseSymbol": "cWBNB",
"quoteSymbol": "USDT",
"poolAddress": "0xd032000000000000000000000000000000000038",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "bnb",
"venue": "dodo_pmm",
@@ -2735,10 +2735,10 @@
"baseSymbol": "cWBNB",
"quoteSymbol": "WBNB",
"poolAddress": "0xd031000000000000000000000000000000000038",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "bnb",
"venue": "dodo_pmm",
@@ -2816,10 +2816,10 @@
"baseSymbol": "cWETHL2",
"quoteSymbol": "USDC",
"poolAddress": "0xd022000000000000000000000000000000002105",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_l2",
"venue": "dodo_pmm",
@@ -2831,10 +2831,10 @@
"baseSymbol": "cWETHL2",
"quoteSymbol": "WETH",
"poolAddress": "0xd021000000000000000000000000000000002105",
"active": false,
"routingEnabled": false,
"mcpVisible": false,
"phase": "wave1-staged",
"active": true,
"routingEnabled": true,
"mcpVisible": true,
"phase": "wave1",
"assetClass": "gas_native",
"familyKey": "eth_l2",
"venue": "dodo_pmm",

View File

@@ -1936,7 +1936,7 @@
"key": "Compliant_WEMIX_cW",
"name": "cWEMIX->cWWEMIX",
"addressFrom": "0x4d82206bec5b4dfa17759ffede07e35f4f63a050",
"addressTo": "0x4c38f9a5ed68a04cd28a72e8c68c459ec34576f3",
"addressTo": "0xc111000000000000000000000000000000000457",
"notes": "Wave 1 gas-family lane wemix: Chain 138 cWEMIX -> Wemix cWWEMIX. hybrid_cap backing with uniswap_v3 reference pricing and DODO PMM edge liquidity."
}
]

View File

@@ -0,0 +1,149 @@
## Chain 138 Blockscout Route and Flash Lineage Report
Date: 2026-04-16
### Summary
- The Chain `138` Blockscout publication set is now fully closed.
- `dodo_v3_core`, `flash_infra`, `native_v2`, and `route_execution_stack` are all `verified` on the public explorer.
- The historical route and flash deployments were not produced by the current local default build outputs.
- The exact recovered historical source/build profile for the deployed route and flash families is:
- source family rooted at commit `6817f53`
- compiler `solc 0.8.20`
- `evm_version = london`
- `optimizer_runs = 200`
- `via_ir = false`
### Important provenance note
The Foundry broadcast files for the route and flash deployments record:
- `commit: 7678218`
That field is not the true source commit for the deployed route and flash contract families.
The exact source lineage was recovered by:
1. tracing the relevant contract families through `git log`
2. testing historical worktrees directly
3. comparing candidate creation bytecode plus ABI-encoded constructor args against the original broadcast `transaction.input`
Recovered deployment family:
- `6817f53`
### Route execution lineage
Broadcast files:
- `smom-dbis-138/broadcast/DeployEnhancedSwapRouterV2.s.sol/138/run-latest.json`
- `smom-dbis-138/broadcast/DeployEnhancedSwapRouterV2.s.sol/138/run-1775195187069.json`
Recovered historical build/profile:
- source root: `6817f53`
- compiler: `0.8.20`
- `evm_version = london`
- `optimizer_runs = 200`
- `via_ir = false`
Recovered live lineage and publication outcome:
- `EnhancedSwapRouterV2`
- address: `0xF1c93F54A5C2fc0d7766Ccb0Ad8f157DFB4C99Ce`
- create tx: `0x30e68f519243377006e93dd82823305729a1ede5f03e744e27e5d57c7b6766a7`
- exact historical creation bytecode/input match: yes
- explorer verification: `verified`
- `IntentBridgeCoordinatorV2`
- address: `0x7D0022B7e8360172fd9C0bB6778113b7Ea3674E7`
- create tx: `0x73fc5f883eda73370e3a4f0d800453e095cee07ef5f37793ed4576f47b4fa5fb`
- exact historical creation bytecode/input match: yes
- explorer verification: `verified`
- `DodoRouteExecutorAdapter`
- address: `0x88495B3dccEA93b0633390fDE71992683121Fa62`
- create tx: `0xc574dde65e90421ed1ff5600c1f9dd71a6b8afb5a1b8416b1dde38bd2961120c`
- exact historical runtime lineage match used as canary: yes
- explorer verification: `verified`
- `DodoV3RouteExecutorAdapter`
- address: `0x9Cb97adD29c52e3B81989BcA2E33D46074B530eF`
- explorer verification: `verified`
- `UniswapV3RouteExecutorAdapter`
- address: `0x960D6db4E78705f82995690548556fb2266308EA`
- explorer verification: `verified`
- `BalancerRouteExecutorAdapter`
- address: `0x4E1B71B69188Ab45021c797039b4887a4924157A`
- explorer verification: `verified`
- `CurveRouteExecutorAdapter`
- address: `0x5f0E07071c41ACcD2A1b1032D3bd49b323b9ADE6`
- explorer verification: `verified`
- `OneInchRouteExecutorAdapter`
- address: `0x8168083d29b3293F215392A49D16e7FeF4a02600`
- explorer verification: `verified`
- `Chain138PilotUniswapV3Router`
- address: `0xD164D9cCfAcf5D9F91698f296aE0cd245D964384`
- explorer verification: `verified`
- `Chain138PilotBalancerVault`
- address: `0x96423d7C1727698D8a25EbFB88131e9422d1a3C3`
- explorer verification: `verified`
- `Chain138PilotCurve3Pool`
- address: `0xE440Ec15805BE4C7BabCD17A63B8C8A08a492e0f`
- explorer verification: `verified`
- `Chain138PilotOneInchAggregationRouter`
- address: `0x500B84b1Bc6F59C1898a5Fe538eA20A758757A4F`
- explorer verification: `verified`
### Flash-infrastructure lineage
Broadcast file:
- `smom-dbis-138/broadcast/DeployCrossChainFlashInfrastructure.s.sol/138/run-latest.json`
Recovered historical build/profile:
- source root: `6817f53`
- compiler: `0.8.20`
- `evm_version = london`
- `optimizer_runs = 200`
- `via_ir = false`
Recovered live lineage and publication outcome:
- `UniversalCCIPFlashBridgeAdapter`
- address: `0xBe9e0B2d4cF6A3b2994d6f2f0904D2B165eB8ffC`
- create tx: `0x8cc4ba611a3d0a6e880f9e21f6390f67aadd6a234df1dc2828788ad775849844`
- exact historical creation bytecode/input match: yes
- explorer verification: `verified`
- `CrossChainFlashRepayReceiver`
- address: `0xD084b68cB4B1ef2cBA09CF99FB1B6552fd9b4859`
- create tx: `0xf1fc28c10956cf368ebfc3d1cdd3150caf8aceacae86964f547a91e66a801e33`
- exact historical creation bytecode/input match: yes
- explorer verification: `verified`
- `CrossChainFlashVaultCreditReceiver`
- address: `0x89F7a1fcbBe104BeE96Da4b4b6b7d3AF85f7E661`
- create tx: `0x085e1cad6e8fbe6642db420563b7b75194a88ec6649fae99cd940c2a894ec1ad`
- exact historical creation bytecode/input match: yes
- explorer verification: `verified`
### DODO publication status
The earlier Blockscout insert/materialization blocker is closed.
Published now:
- `D3Oracle`
- `D3Vault`
- `DODOApprove`
- `DODOApproveProxy`
- `D3MMFactory`
- `D3Proxy`
All six `dodo_v3_core` contracts are fully verified on the public explorer.
### Current truthful end state
- `dodo_v3_core`: `6/6 verified`
- `flash_infra`: `3/3 verified`
- `native_v2`: `4/4 verified`
- `route_execution_stack`: `12/12 verified`
The historical build/profile recovery work is complete for the deployed route contracts and flash trio, and the dedicated verifiers have now been rerun successfully against those recovered artifacts.

View File

@@ -0,0 +1,48 @@
# Chain 138 Deployed Smart Contract Verification Status
This report is generated from the canonical Chain `138` inventory in `config/smart-contracts-master.json`, on-chain bytecode checks against the Core RPC, and Blockscout smart-contract metadata from the internal explorer API.
## Summary
| Group | Total | Deployed | Verified | Bytecode only | Pending |
| --- | ---: | ---: | ---: | ---: | ---: |
| `dodo_v3_core` | 6 | 6 | 6 | 0 | 0 |
| `flash_infra` | 3 | 3 | 3 | 0 | 0 |
| `native_v2` | 4 | 4 | 4 | 0 | 0 |
| `route_execution_stack` | 12 | 12 | 12 | 0 | 0 |
## Inventory
| Group | Label | Address | Deployed | Verification | Blockscout name | Compiler |
| --- | --- | --- | --- | --- | --- | --- |
| `dodo_v3_core` | `D3Oracle` | `0xD7459aEa8bB53C83a1e90262777D730539A326F0` | yes | `verified` | `D3Oracle` | `v0.8.16+commit.07a7930e` |
| `dodo_v3_core` | `D3Vault` | `0x42b6867260Fb9eE6d09B7E0233A1fAD65D0133D1` | yes | `verified` | `D3Vault` | `v0.8.16+commit.07a7930e` |
| `dodo_v3_core` | `DODOApprove` | `0xbF8D5CB7E8F333CA686a27374Ae06F5dfd772E9E` | yes | `verified` | `DODOApprove` | `v0.8.16+commit.07a7930e` |
| `dodo_v3_core` | `DODOApproveProxy` | `0x08d764c03C42635d8ef9046752b5694243E21Fe9` | yes | `verified` | `DODOApproveProxy` | `v0.8.16+commit.07a7930e` |
| `dodo_v3_core` | `D3MMFactory` | `0x78470C7d2925B6738544E2DD4FE7c07CcA21AC31` | yes | `verified` | `D3MMFactory` | `v0.8.16+commit.07a7930e` |
| `dodo_v3_core` | `D3Proxy` | `0xc9a11abB7C63d88546Be24D58a6d95e3762cB843` | yes | `verified` | `D3Proxy` | `v0.8.16+commit.07a7930e` |
| `flash_infra` | `UniversalCCIPFlashBridgeAdapter` | `0xBe9e0B2d4cF6A3b2994d6f2f0904D2B165eB8ffC` | yes | `verified` | `UniversalCCIPFlashBridgeAdapter` | `v0.8.20+commit.a1b79de6` |
| `flash_infra` | `CrossChainFlashRepayReceiver` | `0xD084b68cB4B1ef2cBA09CF99FB1B6552fd9b4859` | yes | `verified` | `CrossChainFlashRepayReceiver` | `v0.8.20+commit.a1b79de6` |
| `flash_infra` | `CrossChainFlashVaultCreditReceiver` | `0x89F7a1fcbBe104BeE96Da4b4b6b7d3AF85f7E661` | yes | `verified` | `CrossChainFlashVaultCreditReceiver` | `v0.8.20+commit.a1b79de6` |
| `native_v2` | `UniswapV2Factory` | `0x0C30F6e67Ab3667fCc2f5CEA8e274ef1FB920279` | yes | `verified` | `UniswapV2Factory` | `v0.5.16+commit.9c3226ce` |
| `native_v2` | `UniswapV2Router` | `0x3019A7fDc76ba7F64F18d78e66842760037ee638` | yes | `verified` | `UniswapV2Router02` | `v0.6.6+commit.6c089d02` |
| `native_v2` | `SushiSwapFactory` | `0x2871207ff0d56089D70c0134d33f1291B6Fce0BE` | yes | `verified` | `UniswapV2Factory` | `v0.6.12+commit.27d51765` |
| `native_v2` | `SushiSwapRouter` | `0xB37b93D38559f53b62ab020A14919f2630a1aE34` | yes | `verified` | `UniswapV2Router02` | `v0.6.12+commit.27d51765` |
| `route_execution_stack` | `EnhancedSwapRouterV2` | `0xF1c93F54A5C2fc0d7766Ccb0Ad8f157DFB4C99Ce` | yes | `verified` | `EnhancedSwapRouterV2` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `IntentBridgeCoordinatorV2` | `0x7D0022B7e8360172fd9C0bB6778113b7Ea3674E7` | yes | `verified` | `IntentBridgeCoordinatorV2` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `DodoRouteExecutorAdapter` | `0x88495B3dccEA93b0633390fDE71992683121Fa62` | yes | `verified` | `DodoRouteExecutorAdapter` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `DodoV3RouteExecutorAdapter` | `0x9Cb97adD29c52e3B81989BcA2E33D46074B530eF` | yes | `verified` | `DodoV3RouteExecutorAdapter` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `UniswapV3RouteExecutorAdapter` | `0x960D6db4E78705f82995690548556fb2266308EA` | yes | `verified` | `UniswapV3RouteExecutorAdapter` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `BalancerRouteExecutorAdapter` | `0x4E1B71B69188Ab45021c797039b4887a4924157A` | yes | `verified` | `BalancerRouteExecutorAdapter` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `CurveRouteExecutorAdapter` | `0x5f0E07071c41ACcD2A1b1032D3bd49b323b9ADE6` | yes | `verified` | `CurveRouteExecutorAdapter` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `OneInchRouteExecutorAdapter` | `0x8168083d29b3293F215392A49D16e7FeF4a02600` | yes | `verified` | `OneInchRouteExecutorAdapter` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `PilotUniswapV3Router` | `0xD164D9cCfAcf5D9F91698f296aE0cd245D964384` | yes | `verified` | `Chain138PilotUniswapV3Router` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `PilotBalancerVault` | `0x96423d7C1727698D8a25EbFB88131e9422d1a3C3` | yes | `verified` | `Chain138PilotBalancerVault` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `PilotCurve3Pool` | `0xE440Ec15805BE4C7BabCD17A63B8C8A08a492e0f` | yes | `verified` | `Chain138PilotCurve3Pool` | `v0.8.20+commit.a1b79de6` |
| `route_execution_stack` | `PilotOneInchRouter` | `0x500B84b1Bc6F59C1898a5Fe538eA20A758757A4F` | yes | `verified` | `Chain138PilotOneInchAggregationRouter` | `v0.8.20+commit.a1b79de6` |
## Notes
- `verified` means Blockscout currently exposes both a contract name and compiler version.
- `bytecode-only` means the address is known to the explorer, but source metadata has not materialized yet.
- `pending` means the contract is deployed in the canonical inventory, but the current Blockscout API response does not yet expose verification metadata.

View File

@@ -1,217 +0,0 @@
# Devin → Gitea → Proxmox CI/CD
**Status:** Working baseline for this repo
**Last Updated:** 2026-04-20
## Goal
Create a repeatable path where:
1. Devin lands code in Gitea.
2. Gitea Actions validates the repo on the site-wide `act_runner`.
3. A successful workflow calls `phoenix-deploy-api`.
4. `phoenix-deploy-api` resolves the repo/branch to a deploy target and runs the matching Proxmox publish command.
5. The deploy service checks the target health URL before it reports success.
## Current baseline in this repo
The path now exists for **`d-bis/proxmox`** on **`main`** and **`master`**:
- Canonical workflow sources: [.gitea/workflow-sources/deploy-to-phoenix.yml](/home/intlc/projects/proxmox/.gitea/workflow-sources/deploy-to-phoenix.yml) and [.gitea/workflow-sources/validate-on-pr.yml](/home/intlc/projects/proxmox/.gitea/workflow-sources/validate-on-pr.yml)
- Workflow: [deploy-to-phoenix.yml](/home/intlc/projects/proxmox/.gitea/workflows/deploy-to-phoenix.yml)
- Manual app workflow: [deploy-portal-live.yml](/home/intlc/projects/proxmox/.gitea/workflows/deploy-portal-live.yml)
- Deploy service: [server.js](/home/intlc/projects/proxmox/phoenix-deploy-api/server.js)
- Target map: [deploy-targets.json](/home/intlc/projects/proxmox/phoenix-deploy-api/deploy-targets.json)
- Current live publish script: [deploy-phoenix-deploy-api-to-dev-vm.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh)
- Manual smoke trigger: [trigger-phoenix-deploy.sh](/home/intlc/projects/proxmox/scripts/dev-vm/trigger-phoenix-deploy.sh)
- Target validator: [validate-phoenix-deploy-targets.sh](/home/intlc/projects/proxmox/scripts/validation/validate-phoenix-deploy-targets.sh)
- Bootstrap helper: [bootstrap-phoenix-cicd.sh](/home/intlc/projects/proxmox/scripts/dev-vm/bootstrap-phoenix-cicd.sh)
That default target publishes the `phoenix-deploy-api` bundle to **VMID 5700** on the correct Proxmox node and starts the CT if needed.
A second target is now available:
- `portal-live` → runs [sync-sankofa-portal-7801.sh](/home/intlc/projects/proxmox/scripts/deployment/sync-sankofa-portal-7801.sh) and then checks `http://192.168.11.51:3000/`
## Workflow lockstep
Because both `main` and `master` can trigger deploys, deploy behavior is now defined from canonical source files and checked for branch parity.
- Edit only the source files under [.gitea/workflow-sources](/home/intlc/projects/proxmox/.gitea/workflow-sources:1)
- Sync the checked-in workflow copies with:
```bash
bash scripts/verify/sync-gitea-workflows.sh
```
- Validate source sync plus `main`/`master` parity with:
```bash
bash scripts/verify/run-all-validation.sh --skip-genesis
```
The deploy and PR workflows both fetch `origin/main` and `origin/master` before validation, so branch drift now fails CI instead of silently changing deploy behavior.
## Flow
```text
Devin
-> push to Gitea
-> Gitea Actions on act_runner (5700)
-> bash scripts/verify/run-all-validation.sh --skip-genesis
-> validates deploy-targets.json structure
-> POST /api/deploy to phoenix-deploy-api
-> match repo + branch + target in deploy-targets.json
-> run deploy command
-> verify target health URL
-> update Gitea commit status success/failure
```
## Required setup
### 1. Runner
Bring up the site-wide Gitea runner on VMID **5700**:
```bash
bash scripts/dev-vm/bootstrap-gitea-act-runner-site-wide.sh
```
Reference: [GITEA_ACT_RUNNER_SETUP.md](GITEA_ACT_RUNNER_SETUP.md)
### 0. One-command bootstrap
If root `.env` already contains the needed values, use:
```bash
bash scripts/dev-vm/bootstrap-phoenix-cicd.sh --repo d-bis/proxmox
```
This runs the validation gate, deploys `phoenix-deploy-api`, and smoke-checks the service.
### 2. Deploy API service
Deploy the API to the dev VM:
```bash
./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --dry-run
./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --apply --start-ct
```
On the target VM, set at least:
```bash
PORT=4001
GITEA_URL=https://gitea.d-bis.org
GITEA_TOKEN=<token with repo status access>
PHOENIX_DEPLOY_SECRET=<shared secret>
PHOENIX_REPO_ROOT=/home/intlc/projects/proxmox
```
Optional:
```bash
DEPLOY_TARGETS_PATH=/opt/phoenix-deploy-api/deploy-targets.json
```
For the `portal-live` target, also set:
```bash
SANKOFA_PORTAL_SRC=/home/intlc/projects/Sankofa/portal
```
### 3. Gitea repo secrets
Set these in the Gitea repository that should deploy:
- `PHOENIX_DEPLOY_URL`
- `PHOENIX_DEPLOY_TOKEN`
Example:
- `PHOENIX_DEPLOY_URL=http://192.168.11.59:4001/api/deploy`
- `PHOENIX_DEPLOY_TOKEN=<same value as PHOENIX_DEPLOY_SECRET>`
For webhook signing, the bootstrap/helper path also expects:
- `PHOENIX_DEPLOY_SECRET`
- `PHOENIX_WEBHOOK_DEPLOY_ENABLED=1` only if you want webhook events themselves to execute deploys
Do not enable both repo Actions deploys and webhook deploys for the same repo unless you intentionally want duplicate deploy attempts.
## Adding more repos or VM targets
Extend [deploy-targets.json](/home/intlc/projects/proxmox/phoenix-deploy-api/deploy-targets.json) with another entry.
Each target is keyed by:
- `repo`
- `branch`
- `target`
Each target defines:
- `cwd`
- `command`
- `required_env`
- optional `healthcheck`
- optional `timeout_sec`
Example shape:
```json
{
"repo": "d-bis/another-service",
"branch": "main",
"target": "portal-live",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": ["bash", "scripts/deployment/sync-sankofa-portal-7801.sh"],
"required_env": ["PHOENIX_REPO_ROOT"]
}
```
Use separate `target` names when the same repo can publish to different VMIDs or environments.
Target-map validation is already part of:
```bash
bash scripts/verify/run-all-validation.sh --skip-genesis
```
and can also be run directly:
```bash
bash scripts/validation/validate-phoenix-deploy-targets.sh
```
## Manual testing
Before trusting a new Gitea workflow, trigger the deploy service directly:
```bash
bash scripts/dev-vm/trigger-phoenix-deploy.sh
```
Trigger the live portal deployment target directly:
```bash
bash scripts/dev-vm/trigger-phoenix-deploy.sh d-bis/proxmox main portal-live
```
Inspect configured targets:
```bash
curl -s http://192.168.11.59:4001/api/deploy-targets | jq .
```
## Recommended next expansions
- Add a Phoenix API target for the repo that owns VMID **7800** or **8600**, depending on which deployment line is canonical.
- Add repo-specific workflows once the Sankofa source repos themselves are mirrored into Gitea Actions.
- Move secret values from ad hoc `.env` files into the final operator-managed secret source once you settle the production host for `phoenix-deploy-api`.
## Notes
- The Gitea workflow is gated by `scripts/verify/run-all-validation.sh --skip-genesis` before deploy.
- `phoenix-deploy-api` now returns `404` when no matching target exists and `500` when the deploy command fails.
- Commit status updates are written back to Gitea from the deploy service itself.

View File

@@ -1,247 +0,0 @@
{
"defaults": {
"timeout_sec": 1800
},
"targets": [
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "default",
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"phoenix-deploy-api/scripts/install-systemd.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "http://192.168.11.59:4001/health",
"expect_status": 200,
"expect_body_includes": "phoenix-deploy-api",
"attempts": 8,
"delay_ms": 3000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "cloudflare-sync",
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "cloudflare-sync-force",
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "portal-live",
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/sync-sankofa-portal-7801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"SANKOFA_PORTAL_SRC"
],
"healthcheck": {
"url": "http://192.168.11.51:3000/",
"expect_status": 200,
"expect_body_includes": "<html",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/CurrenciCombo",
"branch": "main",
"target": "default",
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"PHOENIX_DEPLOY_WORKSPACE"
],
"healthcheck": {
"url": "https://curucombo.xn--vov0g.com/api/ready",
"expect_status": 200,
"expect_body_includes": "\"ready\":true",
"attempts": 12,
"delay_ms": 5000,
"timeout_ms": 15000
}
},
{
"repo": "d-bis/proxmox",
"branch": "main",
"target": "atomic-swap-dapp-live",
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
"expect_status": 200,
"expect_body_includes": "\"liveBridgeRoutes\"",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 15000
}
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "default",
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"phoenix-deploy-api/scripts/install-systemd.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "http://192.168.11.59:4001/health",
"expect_status": 200,
"expect_body_includes": "phoenix-deploy-api",
"attempts": 8,
"delay_ms": 3000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "atomic-swap-dapp-live",
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"healthcheck": {
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
"expect_status": 200,
"expect_body_includes": "\"liveBridgeRoutes\"",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 15000
}
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "cloudflare-sync",
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "cloudflare-sync-force",
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/gitea-cloudflare-sync.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT"
],
"timeout_sec": 600
},
{
"repo": "d-bis/proxmox",
"branch": "master",
"target": "portal-live",
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/sync-sankofa-portal-7801.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"SANKOFA_PORTAL_SRC"
],
"healthcheck": {
"url": "http://192.168.11.51:3000/",
"expect_status": 200,
"expect_body_includes": "<html",
"attempts": 10,
"delay_ms": 5000,
"timeout_ms": 10000
}
},
{
"repo": "d-bis/CurrenciCombo",
"branch": "master",
"target": "default",
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
"cwd": "${PHOENIX_REPO_ROOT}",
"command": [
"bash",
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
],
"required_env": [
"PHOENIX_REPO_ROOT",
"PHOENIX_DEPLOY_WORKSPACE"
],
"healthcheck": {
"url": "https://curucombo.xn--vov0g.com/api/ready",
"expect_status": 200,
"expect_body_includes": "\"ready\":true",
"attempts": 12,
"delay_ms": 5000,
"timeout_ms": 15000
}
}
]
}

View File

@@ -25,70 +25,7 @@ if [[ -f "$REPO_ROOT/config/public-sector-program-manifest.json" ]]; then
else
echo "WARN: $REPO_ROOT/config/public-sector-program-manifest.json missing — set PUBLIC_SECTOR_MANIFEST_PATH in .env"
fi
if [[ -f "$TARGET/.env" ]]; then
echo "Preserving existing $TARGET/.env"
elif [[ -f "$APP_DIR/.env" ]]; then
cp "$APP_DIR/.env" "$TARGET/.env"
elif [[ -f "$APP_DIR/.env.example" ]]; then
cp "$APP_DIR/.env.example" "$TARGET/.env"
fi
ensure_env_value() {
local key="$1"
local value="$2"
local file="$TARGET/.env"
[[ -n "$value" && -f "$file" ]] || return 0
local current=""
if grep -qE "^${key}=" "$file"; then
current="$(grep -E "^${key}=" "$file" | tail -n 1 | cut -d= -f2-)"
fi
[[ -z "$current" ]] || return 0
local tmp
tmp="$(mktemp)"
awk -v key="$key" -v value="$value" '
BEGIN { found = 0 }
$0 ~ "^" key "=" {
print key "=" value
found = 1
next
}
{ print }
END {
if (!found) print key "=" value
}
' "$file" > "$tmp"
cat "$tmp" > "$file"
rm -f "$tmp"
}
repo_env_value() {
local key="$1"
local file="$REPO_ROOT/.env"
[[ -f "$file" ]] || return 0
grep -E "^${key}=" "$file" | tail -n 1 | cut -d= -f2-
}
if [[ -f "$TARGET/.env" ]]; then
ensure_env_value PHOENIX_REPO_ROOT "$REPO_ROOT"
for key in \
GITEA_TOKEN \
PHOENIX_DEPLOY_SECRET \
PROXMOX_HOST \
PROXMOX_PORT \
PROXMOX_USER \
PROXMOX_TOKEN_NAME \
PROXMOX_TOKEN_VALUE \
PROXMOX_TLS_VERIFY \
PUBLIC_IP \
CLOUDFLARE_API_TOKEN \
CLOUDFLARE_GITEA_SYNC_ZONE \
PHOENIX_CLOUDFLARE_SYNC
do
ensure_env_value "$key" "$(repo_env_value "$key")"
done
fi
[ -f "$APP_DIR/.env" ] && cp "$APP_DIR/.env" "$TARGET/.env" || [ -f "$APP_DIR/.env.example" ] && cp "$APP_DIR/.env.example" "$TARGET/.env" || true
chown -R root:root "$TARGET"
cd "$TARGET" && npm install --omit=dev
cp "$APP_DIR/phoenix-deploy-api.service" /etc/systemd/system/

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env node
/**
* Phoenix Deploy API — Gitea webhook receiver, deploy execution API, and Phoenix API Railing (Infra/VE)
* Phoenix Deploy API — Gitea webhook receiver, deploy stub, and Phoenix API Railing (Infra/VE)
*
* Endpoints:
* POST /webhook/gitea — Receives Gitea push/tag/PR webhooks
@@ -19,9 +19,7 @@
import crypto from 'crypto';
import https from 'https';
import path from 'path';
import { promisify } from 'util';
import { execFile as execFileCallback } from 'child_process';
import { cpSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readdirSync, rmSync, writeFileSync } from 'fs';
import { readFileSync, existsSync } from 'fs';
import { fileURLToPath } from 'url';
import express from 'express';
@@ -31,13 +29,6 @@ const PORT = parseInt(process.env.PORT || '4001', 10);
const GITEA_URL = (process.env.GITEA_URL || 'https://gitea.d-bis.org').replace(/\/$/, '');
const GITEA_TOKEN = process.env.GITEA_TOKEN || '';
const WEBHOOK_SECRET = process.env.PHOENIX_DEPLOY_SECRET || '';
const PHOENIX_REPO_ROOT_DEFAULT = (process.env.PHOENIX_REPO_ROOT_DEFAULT || '/srv/projects/proxmox').trim();
const ATOMIC_SWAP_REPO = (process.env.PHOENIX_ATOMIC_SWAP_REPO || 'd-bis/atomic-swap-dapp').trim();
const ATOMIC_SWAP_REF = (process.env.PHOENIX_ATOMIC_SWAP_REF || 'main').trim();
const CROSS_CHAIN_PMM_LPS_REPO = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REPO || '').trim();
const CROSS_CHAIN_PMM_LPS_REF = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REF || 'main').trim();
const SMOM_DBIS_138_REPO = (process.env.PHOENIX_SMOM_DBIS_138_REPO || '').trim();
const SMOM_DBIS_138_REF = (process.env.PHOENIX_SMOM_DBIS_138_REF || 'main').trim();
const PROXMOX_HOST = process.env.PROXMOX_HOST || '';
const PROXMOX_PORT = parseInt(process.env.PROXMOX_PORT || '8006', 10);
@@ -51,17 +42,6 @@ const PROMETHEUS_URL = (process.env.PROMETHEUS_URL || 'http://localhost:9090').r
const PHOENIX_WEBHOOK_URL = process.env.PHOENIX_WEBHOOK_URL || '';
const PHOENIX_WEBHOOK_SECRET = process.env.PHOENIX_WEBHOOK_SECRET || '';
const PARTNER_KEYS = (process.env.PHOENIX_PARTNER_KEYS || '').split(',').map((k) => k.trim()).filter(Boolean);
const WEBHOOK_DEPLOY_ENABLED = process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === '1' || process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === 'true';
const execFile = promisify(execFileCallback);
function expandEnvTokens(value, env = process.env) {
if (typeof value !== 'string') return value;
return value.replace(/\$\{([A-Z0-9_]+)\}/gi, (_, key) => env[key] || '');
}
function resolvePhoenixRepoRoot() {
return (process.env.PHOENIX_REPO_ROOT || PHOENIX_REPO_ROOT_DEFAULT || '').trim().replace(/\/$/, '');
}
/**
* Manifest resolution order:
@@ -83,395 +63,15 @@ function resolvePublicSectorManifestPath() {
return path.join(__dirname, '..', 'config', 'public-sector-program-manifest.json');
}
function resolveDeployTargetsPath() {
const override = (process.env.DEPLOY_TARGETS_PATH || '').trim();
if (override && existsSync(override)) return override;
const bundled = path.join(__dirname, 'deploy-targets.json');
if (existsSync(bundled)) return bundled;
return bundled;
}
function loadDeployTargetsConfig() {
const configPath = resolveDeployTargetsPath();
if (!existsSync(configPath)) {
return {
path: configPath,
defaults: {},
targets: [],
};
}
const raw = readFileSync(configPath, 'utf8');
const parsed = JSON.parse(raw);
return {
path: configPath,
defaults: parsed.defaults || {},
targets: Array.isArray(parsed.targets) ? parsed.targets : [],
};
}
function findDeployTarget(repo, branch, requestedTarget) {
const config = loadDeployTargetsConfig();
const wantedTarget = requestedTarget || 'default';
const match = config.targets.find((entry) => {
if (entry.repo !== repo) return false;
if ((entry.branch || 'main') !== branch) return false;
return (entry.target || 'default') === wantedTarget;
});
return { config, match, wantedTarget };
}
async function sleep(ms) {
await new Promise((resolve) => setTimeout(resolve, ms));
}
async function verifyHealthCheck(healthcheck) {
if (!healthcheck || !healthcheck.url) return null;
const attempts = Math.max(1, Number(healthcheck.attempts || 1));
const delayMs = Math.max(0, Number(healthcheck.delay_ms || 0));
const timeoutMs = Math.max(1000, Number(healthcheck.timeout_ms || 10000));
const expectedStatus = Number(healthcheck.expect_status || 200);
const expectBodyIncludes = healthcheck.expect_body_includes || '';
let lastError = null;
for (let attempt = 1; attempt <= attempts; attempt += 1) {
try {
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), timeoutMs);
const res = await fetch(healthcheck.url, { signal: controller.signal });
const body = await res.text();
clearTimeout(timeout);
if (res.status !== expectedStatus) {
throw new Error(`Expected HTTP ${expectedStatus}, got ${res.status}`);
}
if (expectBodyIncludes && !body.includes(expectBodyIncludes)) {
throw new Error(`Health body missing expected text: ${expectBodyIncludes}`);
}
return {
ok: true,
url: healthcheck.url,
status: res.status,
attempt,
};
} catch (err) {
lastError = err;
if (attempt < attempts && delayMs > 0) {
await sleep(delayMs);
}
}
}
throw new Error(`Health check failed for ${healthcheck.url}: ${lastError?.message || 'unknown error'}`);
}
async function downloadRepoArchive({ owner, repo, ref, archivePath, authToken }) {
const archiveRef = `${ref}.tar.gz`;
const url = `${GITEA_URL}/api/v1/repos/${owner}/${repo}/archive/${archiveRef}`;
const headers = {};
if (authToken) headers.Authorization = `token ${authToken}`;
const res = await fetch(url, { headers });
if (!res.ok) {
throw new Error(`Failed to download archive ${owner}/${repo}@${ref}: HTTP ${res.status}`);
}
const buffer = Buffer.from(await res.arrayBuffer());
writeFileSync(archivePath, buffer);
}
function syncExtractedTree({ sourceRoot, destRoot, entries = null }) {
mkdirSync(destRoot, { recursive: true });
const selectedEntries = Array.isArray(entries) ? entries : readdirSync(sourceRoot);
for (const entry of selectedEntries) {
const sourcePath = path.join(sourceRoot, entry);
if (!existsSync(sourcePath)) continue;
const destPath = path.join(destRoot, entry);
rmSync(destPath, { recursive: true, force: true });
cpSync(sourcePath, destPath, { recursive: true });
}
}
async function syncRepoArchive({ owner, repo, ref, destRoot, entries = null, authToken = '' }) {
const tempDir = mkdtempSync('/tmp/phoenix-archive-');
const archivePath = path.join(tempDir, 'repo.tar.gz');
const extractDir = path.join(tempDir, 'extract');
mkdirSync(extractDir, { recursive: true });
try {
await downloadRepoArchive({ owner, repo, ref, archivePath, authToken });
await execFile('tar', ['-xzf', archivePath, '-C', extractDir]);
const [rootDir] = readdirSync(extractDir);
if (!rootDir) {
throw new Error(`Archive for ${owner}/${repo}@${ref} was empty`);
}
syncExtractedTree({
sourceRoot: path.join(extractDir, rootDir),
destRoot,
entries,
});
} finally {
rmSync(tempDir, { recursive: true, force: true });
}
}
async function prepareDeployWorkspace({ repo, branch, sha, target }) {
const repoRoot = resolvePhoenixRepoRoot();
if (!repoRoot) {
throw new Error('PHOENIX_REPO_ROOT is not configured');
}
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
const externalWorkspaceRoot = path.join(repoRoot, '.phoenix-deploy-workspaces', owner, repoName);
// Manual smoke tests can target the already-staged local workspace without
// forcing an archive sync from Gitea.
if (sha === 'HEAD' || sha === 'local') {
mkdirSync(repoRoot, { recursive: true });
if (repo !== 'd-bis/proxmox') {
mkdirSync(externalWorkspaceRoot, { recursive: true });
}
return {
PHOENIX_REPO_ROOT: repoRoot,
PROXMOX_REPO_ROOT: repoRoot,
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
};
}
const ref = sha || branch || 'main';
if (repo === 'd-bis/proxmox') {
await syncRepoArchive({
owner,
repo: repoName,
ref,
destRoot: repoRoot,
entries: ['config', 'phoenix-deploy-api', 'reports', 'scripts', 'token-lists'],
authToken: GITEA_TOKEN,
});
} else {
await syncRepoArchive({
owner,
repo: repoName,
ref,
destRoot: externalWorkspaceRoot,
authToken: GITEA_TOKEN,
});
}
if (repo === 'd-bis/proxmox' && target === 'atomic-swap-dapp-live') {
const [swapOwner, swapRepo] = ATOMIC_SWAP_REPO.includes('/')
? ATOMIC_SWAP_REPO.split('/')
: ['d-bis', ATOMIC_SWAP_REPO];
await syncRepoArchive({
owner: swapOwner,
repo: swapRepo,
ref: ATOMIC_SWAP_REF,
destRoot: path.join(repoRoot, 'atomic-swap-dapp'),
authToken: GITEA_TOKEN,
});
if (CROSS_CHAIN_PMM_LPS_REPO) {
const [lpsOwner, lpsRepo] = CROSS_CHAIN_PMM_LPS_REPO.includes('/')
? CROSS_CHAIN_PMM_LPS_REPO.split('/')
: ['d-bis', CROSS_CHAIN_PMM_LPS_REPO];
await syncRepoArchive({
owner: lpsOwner,
repo: lpsRepo,
ref: CROSS_CHAIN_PMM_LPS_REF,
destRoot: path.join(repoRoot, 'cross-chain-pmm-lps'),
authToken: GITEA_TOKEN,
});
}
if (SMOM_DBIS_138_REPO) {
const [smomOwner, smomRepo] = SMOM_DBIS_138_REPO.includes('/')
? SMOM_DBIS_138_REPO.split('/')
: ['d-bis', SMOM_DBIS_138_REPO];
await syncRepoArchive({
owner: smomOwner,
repo: smomRepo,
ref: SMOM_DBIS_138_REF,
destRoot: path.join(repoRoot, 'smom-dbis-138'),
authToken: GITEA_TOKEN,
});
}
}
return {
PHOENIX_REPO_ROOT: repoRoot,
PROXMOX_REPO_ROOT: repoRoot,
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
};
}
async function runDeployTarget(definition, configDefaults, context, envOverrides = {}) {
if (!Array.isArray(definition.command) || definition.command.length === 0) {
throw new Error('Deploy target is missing a command array');
}
const childEnv = {
...process.env,
...envOverrides,
PHOENIX_DEPLOY_REPO: context.repo,
PHOENIX_DEPLOY_BRANCH: context.branch,
PHOENIX_DEPLOY_SHA: context.sha || '',
PHOENIX_DEPLOY_TARGET: context.target,
PHOENIX_DEPLOY_TRIGGER: context.trigger,
};
const cwd = expandEnvTokens(definition.cwd || configDefaults.cwd || process.cwd(), childEnv);
const timeoutSeconds = Number(definition.timeout_sec || configDefaults.timeout_sec || 1800);
const timeout = Number.isFinite(timeoutSeconds) && timeoutSeconds > 0 ? timeoutSeconds * 1000 : 1800 * 1000;
const command = definition.command.map((part) => expandEnvTokens(part, childEnv));
const missingEnv = (definition.required_env || []).filter((key) => !childEnv[key]);
if (missingEnv.length > 0) {
throw new Error(`Missing required env for deploy target: ${missingEnv.join(', ')}`);
}
if (!existsSync(cwd)) {
throw new Error(`Deploy working directory does not exist: ${cwd}`);
}
const { stdout, stderr } = await execFile(command[0], command.slice(1), {
cwd,
env: childEnv,
timeout,
maxBuffer: 10 * 1024 * 1024,
});
const healthcheck = await verifyHealthCheck(definition.healthcheck || configDefaults.healthcheck || null);
return {
cwd,
command,
stdout: stdout || '',
stderr: stderr || '',
timeout_sec: timeoutSeconds,
healthcheck,
};
}
async function executeDeploy({ repo, branch = 'main', target = 'default', sha = '', trigger = 'api' }) {
if (!repo) {
const error = new Error('repo required');
error.statusCode = 400;
error.payload = { error: error.message };
throw error;
}
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
const commitSha = sha || '';
const requestedTarget = target || 'default';
const { config, match, wantedTarget } = findDeployTarget(repo, branch, requestedTarget);
if (!match) {
const error = new Error('Deploy target not configured');
error.statusCode = 404;
error.payload = {
error: error.message,
repo,
branch,
target: wantedTarget,
config_path: config.path,
};
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `No deploy target for ${repo} ${branch} ${wantedTarget}`);
}
throw error;
}
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
}
console.log(`[deploy] ${repo} branch=${branch} target=${wantedTarget} sha=${commitSha} trigger=${trigger}`);
let deployResult = null;
let deployError = null;
let envOverrides = {};
try {
envOverrides = await prepareDeployWorkspace({
repo,
branch,
sha: commitSha,
target: wantedTarget,
});
deployResult = await runDeployTarget(match, config.defaults, {
repo,
branch,
sha: commitSha,
target: wantedTarget,
trigger,
}, envOverrides);
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'success', `Deployed to ${wantedTarget}`);
}
return {
status: 'completed',
repo,
branch,
target: wantedTarget,
config_path: config.path,
command: deployResult.command,
cwd: deployResult.cwd,
stdout: deployResult.stdout,
stderr: deployResult.stderr,
healthcheck: deployResult.healthcheck,
};
} catch (err) {
deployError = err;
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `Deploy failed: ${err.message.slice(0, 120)}`);
}
err.statusCode = err.statusCode || 500;
err.payload = err.payload || {
error: err.message,
repo,
branch,
target: wantedTarget,
config_path: config.path,
};
throw err;
} finally {
if (PHOENIX_WEBHOOK_URL) {
const payload = {
event: 'deploy.completed',
repo,
branch,
target: wantedTarget,
sha: commitSha,
success: Boolean(deployResult),
command: deployResult?.command,
cwd: deployResult?.cwd,
phoenix_repo_root: envOverrides.PHOENIX_REPO_ROOT || null,
error: deployError?.message || null,
};
const body = JSON.stringify(payload);
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
fetch(PHOENIX_WEBHOOK_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
body,
}).catch((e) => console.error('[webhook] outbound failed', e.message));
}
}
}
const httpsAgent = new https.Agent({ rejectUnauthorized: process.env.PROXMOX_TLS_VERIFY !== '0' });
function formatProxmoxAuthHeader(user, tokenName, tokenValue) {
if (tokenName.includes('!')) {
return `PVEAPIToken=${tokenName}=${tokenValue}`;
}
return `PVEAPIToken=${user}!${tokenName}=${tokenValue}`;
}
async function proxmoxRequest(endpoint, method = 'GET', body = null) {
const baseUrl = `https://${PROXMOX_HOST}:${PROXMOX_PORT}/api2/json`;
const url = `${baseUrl}${endpoint}`;
const options = {
method,
headers: {
Authorization: formatProxmoxAuthHeader(PROXMOX_USER, PROXMOX_TOKEN_NAME, PROXMOX_TOKEN_VALUE),
Authorization: `PVEAPIToken=${PROXMOX_USER}!${PROXMOX_TOKEN_NAME}=${PROXMOX_TOKEN_VALUE}`,
'Content-Type': 'application/json',
},
agent: httpsAgent,
@@ -562,44 +162,12 @@ app.post('/webhook/gitea', async (req, res) => {
if (action === 'push' || (action === 'synchronize' && payload.pull_request)) {
if (branch === 'main' || branch === 'master' || ref.startsWith('refs/tags/')) {
if (!WEBHOOK_DEPLOY_ENABLED) {
return res.status(200).json({
received: true,
repo: fullName,
branch,
sha,
deployed: false,
message: 'Webhook accepted; set PHOENIX_WEBHOOK_DEPLOY_ENABLED=1 to execute deploys from webhook events.',
});
}
try {
const result = await executeDeploy({
repo: fullName,
branch,
sha,
target: 'default',
trigger: 'webhook',
});
return res.status(200).json({
received: true,
repo: fullName,
branch,
sha,
deployed: true,
result,
});
} catch (err) {
return res.status(200).json({
received: true,
repo: fullName,
branch,
sha,
deployed: false,
error: err.message,
details: err.payload || null,
});
if (sha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, sha, 'pending', 'Phoenix deployment triggered');
}
// Stub: enqueue deploy; actual implementation would call Proxmox/deploy logic
console.log(`[deploy-stub] Would deploy ${fullName} branch=${branch} sha=${sha}`);
// Stub: when full deploy runs, call setGiteaCommitStatus(owner, repoName, sha, 'success'|'failure', ...)
}
}
@@ -617,36 +185,47 @@ app.post('/api/deploy', async (req, res) => {
}
const { repo, branch = 'main', target, sha } = req.body;
try {
const result = await executeDeploy({
repo,
branch,
sha,
target,
trigger: 'api',
});
res.status(200).json(result);
} catch (err) {
res.status(err.statusCode || 500).json(err.payload || { error: err.message });
if (!repo) {
return res.status(400).json({ error: 'repo required' });
}
});
app.get('/api/deploy-targets', (req, res) => {
const config = loadDeployTargetsConfig();
const targets = config.targets.map((entry) => ({
repo: entry.repo,
branch: entry.branch || 'main',
target: entry.target || 'default',
description: entry.description || '',
cwd: entry.cwd || config.defaults.cwd || '',
command: entry.command || [],
has_healthcheck: Boolean(entry.healthcheck || config.defaults.healthcheck),
}));
res.json({
config_path: config.path,
count: targets.length,
targets,
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
const commitSha = sha || '';
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
}
console.log(`[deploy] ${repo} branch=${branch} target=${target || 'default'} sha=${commitSha}`);
// Stub: no real deploy yet — report success so Gitea shows green; replace with real deploy + setGiteaCommitStatus on completion
const deploySuccess = true;
if (commitSha && GITEA_TOKEN) {
await setGiteaCommitStatus(
owner,
repoName,
commitSha,
deploySuccess ? 'success' : 'failure',
deploySuccess ? 'Deploy accepted (stub)' : 'Deploy failed (stub)'
);
}
res.status(202).json({
status: 'accepted',
repo,
branch,
target: target || 'default',
message: 'Deploy request queued (stub). Implement full deploy logic in Sankofa Phoenix API.',
});
if (PHOENIX_WEBHOOK_URL) {
const payload = { event: 'deploy.completed', repo, branch, target: target || 'default', sha: commitSha, success: deploySuccess };
const body = JSON.stringify(payload);
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
fetch(PHOENIX_WEBHOOK_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
body,
}).catch((e) => console.error('[webhook] outbound failed', e.message));
}
});
/**
@@ -895,10 +474,7 @@ app.listen(PORT, () => {
if (!GITEA_TOKEN) console.warn('GITEA_TOKEN not set — commit status updates disabled');
if (!hasProxmox) console.warn('PROXMOX_* not set — Infra/VE API returns stub data');
if (PHOENIX_WEBHOOK_URL) console.log('Outbound webhook enabled:', PHOENIX_WEBHOOK_URL);
if (WEBHOOK_DEPLOY_ENABLED) console.log('Inbound webhook deploy execution enabled');
if (PARTNER_KEYS.length > 0) console.log('Partner API key auth enabled for /api/v1/* (except GET /api/v1/public-sector/programs)');
const mpath = resolvePublicSectorManifestPath();
const dpath = resolveDeployTargetsPath();
console.log(`Public-sector manifest: ${mpath} (${existsSync(mpath) ? 'ok' : 'missing'})`);
console.log(`Deploy targets: ${dpath} (${existsSync(dpath) ? 'ok' : 'missing'})`);
});

View File

@@ -0,0 +1,259 @@
{
"rows": [
{
"group": "dodo_v3_core",
"label": "D3Oracle",
"address": "0xD7459aEa8bB53C83a1e90262777D730539A326F0",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "D3Oracle",
"compilerVersion": "v0.8.16+commit.07a7930e"
},
{
"group": "dodo_v3_core",
"label": "D3Vault",
"address": "0x42b6867260Fb9eE6d09B7E0233A1fAD65D0133D1",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "D3Vault",
"compilerVersion": "v0.8.16+commit.07a7930e"
},
{
"group": "dodo_v3_core",
"label": "DODOApprove",
"address": "0xbF8D5CB7E8F333CA686a27374Ae06F5dfd772E9E",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "DODOApprove",
"compilerVersion": "v0.8.16+commit.07a7930e"
},
{
"group": "dodo_v3_core",
"label": "DODOApproveProxy",
"address": "0x08d764c03C42635d8ef9046752b5694243E21Fe9",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "DODOApproveProxy",
"compilerVersion": "v0.8.16+commit.07a7930e"
},
{
"group": "dodo_v3_core",
"label": "D3MMFactory",
"address": "0x78470C7d2925B6738544E2DD4FE7c07CcA21AC31",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "D3MMFactory",
"compilerVersion": "v0.8.16+commit.07a7930e"
},
{
"group": "dodo_v3_core",
"label": "D3Proxy",
"address": "0xc9a11abB7C63d88546Be24D58a6d95e3762cB843",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "D3Proxy",
"compilerVersion": "v0.8.16+commit.07a7930e"
},
{
"group": "flash_infra",
"label": "UniversalCCIPFlashBridgeAdapter",
"address": "0xBe9e0B2d4cF6A3b2994d6f2f0904D2B165eB8ffC",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "UniversalCCIPFlashBridgeAdapter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "flash_infra",
"label": "CrossChainFlashRepayReceiver",
"address": "0xD084b68cB4B1ef2cBA09CF99FB1B6552fd9b4859",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "CrossChainFlashRepayReceiver",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "flash_infra",
"label": "CrossChainFlashVaultCreditReceiver",
"address": "0x89F7a1fcbBe104BeE96Da4b4b6b7d3AF85f7E661",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "CrossChainFlashVaultCreditReceiver",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "native_v2",
"label": "UniswapV2Factory",
"address": "0x0C30F6e67Ab3667fCc2f5CEA8e274ef1FB920279",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "UniswapV2Factory",
"compilerVersion": "v0.5.16+commit.9c3226ce"
},
{
"group": "native_v2",
"label": "UniswapV2Router",
"address": "0x3019A7fDc76ba7F64F18d78e66842760037ee638",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "UniswapV2Router02",
"compilerVersion": "v0.6.6+commit.6c089d02"
},
{
"group": "native_v2",
"label": "SushiSwapFactory",
"address": "0x2871207ff0d56089D70c0134d33f1291B6Fce0BE",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "UniswapV2Factory",
"compilerVersion": "v0.6.12+commit.27d51765"
},
{
"group": "native_v2",
"label": "SushiSwapRouter",
"address": "0xB37b93D38559f53b62ab020A14919f2630a1aE34",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "UniswapV2Router02",
"compilerVersion": "v0.6.12+commit.27d51765"
},
{
"group": "route_execution_stack",
"label": "EnhancedSwapRouterV2",
"address": "0xF1c93F54A5C2fc0d7766Ccb0Ad8f157DFB4C99Ce",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "EnhancedSwapRouterV2",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "IntentBridgeCoordinatorV2",
"address": "0x7D0022B7e8360172fd9C0bB6778113b7Ea3674E7",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "IntentBridgeCoordinatorV2",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "DodoRouteExecutorAdapter",
"address": "0x88495B3dccEA93b0633390fDE71992683121Fa62",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "DodoRouteExecutorAdapter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "DodoV3RouteExecutorAdapter",
"address": "0x9Cb97adD29c52e3B81989BcA2E33D46074B530eF",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "DodoV3RouteExecutorAdapter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "UniswapV3RouteExecutorAdapter",
"address": "0x960D6db4E78705f82995690548556fb2266308EA",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "UniswapV3RouteExecutorAdapter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "BalancerRouteExecutorAdapter",
"address": "0x4E1B71B69188Ab45021c797039b4887a4924157A",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "BalancerRouteExecutorAdapter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "CurveRouteExecutorAdapter",
"address": "0x5f0E07071c41ACcD2A1b1032D3bd49b323b9ADE6",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "CurveRouteExecutorAdapter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "OneInchRouteExecutorAdapter",
"address": "0x8168083d29b3293F215392A49D16e7FeF4a02600",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "OneInchRouteExecutorAdapter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "PilotUniswapV3Router",
"address": "0xD164D9cCfAcf5D9F91698f296aE0cd245D964384",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "Chain138PilotUniswapV3Router",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "PilotBalancerVault",
"address": "0x96423d7C1727698D8a25EbFB88131e9422d1a3C3",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "Chain138PilotBalancerVault",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "PilotCurve3Pool",
"address": "0xE440Ec15805BE4C7BabCD17A63B8C8A08a492e0f",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "Chain138PilotCurve3Pool",
"compilerVersion": "v0.8.20+commit.a1b79de6"
},
{
"group": "route_execution_stack",
"label": "PilotOneInchRouter",
"address": "0x500B84b1Bc6F59C1898a5Fe538eA20A758757A4F",
"deployed": true,
"verificationState": "verified",
"blockscoutName": "Chain138PilotOneInchAggregationRouter",
"compilerVersion": "v0.8.20+commit.a1b79de6"
}
],
"summary": {
"dodo_v3_core": {
"total": 6,
"deployed": 6,
"verified": 6,
"bytecode_only": 0,
"pending": 0
},
"flash_infra": {
"total": 3,
"deployed": 3,
"verified": 3,
"bytecode_only": 0,
"pending": 0
},
"native_v2": {
"total": 4,
"deployed": 4,
"verified": 4,
"bytecode_only": 0,
"pending": 0
},
"route_execution_stack": {
"total": 12,
"deployed": 12,
"verified": 12,
"bytecode_only": 0,
"pending": 0
}
}
}

View File

@@ -1,152 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SUBMODULE_ROOT="$PROJECT_ROOT/atomic-swap-dapp"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_DAPP_HOST:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
VMID="${VMID:-5801}"
DEPLOY_ROOT="${DEPLOY_ROOT:-/var/www/atomic-swap}"
TMP_ARCHIVE="/tmp/atomic-swap-dapp-5801.tgz"
DIST_DIR="$SUBMODULE_ROOT/dist"
SKIP_BUILD="${SKIP_BUILD:-0}"
SSH_OPTS="${SSH_OPTS:--o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new}"
cleanup() {
rm -f "$TMP_ARCHIVE"
}
trap cleanup EXIT
if [ ! -d "$SUBMODULE_ROOT" ]; then
echo "Missing submodule at $SUBMODULE_ROOT" >&2
exit 1
fi
cd "$SUBMODULE_ROOT"
if [ "$SKIP_BUILD" != "1" ]; then
if [ -f package-lock.json ]; then
npm ci >/dev/null
else
npm install >/dev/null
fi
npm run sync:ecosystem >/dev/null
npm run validate:manifest >/dev/null
npm run build >/dev/null
fi
for required_path in \
"$DIST_DIR/index.html" \
"$DIST_DIR/data/ecosystem-manifest.json" \
"$DIST_DIR/data/live-route-registry.json" \
"$DIST_DIR/data/deployed-venue-inventory.json"; do
if [ ! -f "$required_path" ]; then
echo "Missing required build artifact: $required_path" >&2
exit 1
fi
done
jq -e '.supportedNetworks[] | select(.chainId == 138) | .deployedVenuePoolCount >= 19 and .publicRoutingPoolCount >= 19' \
"$DIST_DIR/data/ecosystem-manifest.json" >/dev/null
jq -e '.liveSwapRoutes | length >= 19' "$DIST_DIR/data/live-route-registry.json" >/dev/null
jq -e '.liveBridgeRoutes | length >= 12' "$DIST_DIR/data/live-route-registry.json" >/dev/null
jq -e '.networks[] | select(.chainId == 138) | .venueCounts.deployedVenuePoolCount >= 19 and .summary.totalVenues >= 19' \
"$DIST_DIR/data/deployed-venue-inventory.json" >/dev/null
rm -f "$TMP_ARCHIVE"
tar -C "$SUBMODULE_ROOT" -czf "$TMP_ARCHIVE" dist
ssh $SSH_OPTS "root@$PROXMOX_HOST" true
scp -q $SSH_OPTS "$TMP_ARCHIVE" "root@$PROXMOX_HOST:/tmp/atomic-swap-dapp-5801.tgz"
ssh $SSH_OPTS "root@$PROXMOX_HOST" "
set -euo pipefail
pct push $VMID /tmp/atomic-swap-dapp-5801.tgz /tmp/atomic-swap-dapp-5801.tgz
pct exec $VMID -- bash -lc '
set -euo pipefail
mkdir -p \"$DEPLOY_ROOT\"
find \"$DEPLOY_ROOT\" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
rm -rf /tmp/dist
tar -xzf /tmp/atomic-swap-dapp-5801.tgz -C /tmp
cp -R /tmp/dist/. \"$DEPLOY_ROOT/\"
mkdir -p /var/cache/nginx/atomic-swap-api
cat > /etc/nginx/conf.d/atomic-swap-api-cache.conf <<\"EOF\"
proxy_cache_path /var/cache/nginx/atomic-swap-api
levels=1:2
keys_zone=atomic_swap_api_cache:10m
max_size=256m
inactive=30m
use_temp_path=off;
EOF
cat > /etc/nginx/sites-available/atomic-swap <<\"EOF\"
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root $DEPLOY_ROOT;
index index.html;
location / {
try_files \$uri \$uri/ /index.html;
}
location = /index.html {
add_header Cache-Control \"no-store, no-cache, must-revalidate\" always;
}
location /data/ {
add_header Cache-Control \"no-store, no-cache, must-revalidate\" always;
}
location /assets/ {
add_header Cache-Control \"public, max-age=31536000, immutable\" always;
}
location /api/v1/ {
proxy_pass https://explorer.d-bis.org/api/v1/;
proxy_ssl_server_name on;
proxy_set_header Host explorer.d-bis.org;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host \$host;
proxy_http_version 1.1;
proxy_buffering on;
proxy_cache atomic_swap_api_cache;
proxy_cache_methods GET HEAD;
proxy_cache_key \"\$scheme\$proxy_host\$request_uri\";
proxy_cache_lock on;
proxy_cache_lock_timeout 10s;
proxy_cache_lock_age 10s;
proxy_cache_background_update on;
proxy_cache_revalidate on;
proxy_cache_valid 200 10s;
proxy_cache_valid 404 1s;
proxy_cache_valid any 0;
proxy_cache_use_stale error timeout invalid_header updating http_429 http_500 http_502 http_503 http_504;
add_header X-Atomic-Swap-Cache \$upstream_cache_status always;
}
}
EOF
ln -sfn /etc/nginx/sites-available/atomic-swap /etc/nginx/sites-enabled/atomic-swap
rm -f /etc/nginx/sites-enabled/default
rm -f /etc/nginx/sites-enabled/dapp
nginx -t
systemctl reload nginx
curl -fsS http://127.0.0.1/index.html >/dev/null
curl -fsS http://127.0.0.1/data/ecosystem-manifest.json >/dev/null
curl -fsS http://127.0.0.1/data/live-route-registry.json >/dev/null
curl -fsS http://127.0.0.1/data/deployed-venue-inventory.json >/dev/null
rm -rf /tmp/dist /tmp/atomic-swap-dapp-5801.tgz
'
rm -f /tmp/atomic-swap-dapp-5801.tgz
"
curl -fsS https://atomic-swap.defi-oracle.io/ >/dev/null
curl -fsS https://atomic-swap.defi-oracle.io/data/ecosystem-manifest.json | jq -e '.supportedNetworks[] | select(.chainId == 138) | .deployedVenuePoolCount >= 19 and .publicRoutingPoolCount >= 19' >/dev/null
curl -fsS https://atomic-swap.defi-oracle.io/data/live-route-registry.json | jq -e '.liveSwapRoutes | length >= 19' >/dev/null
curl -fsS https://atomic-swap.defi-oracle.io/data/live-route-registry.json | jq -e '.liveBridgeRoutes | length >= 12' >/dev/null
curl -fsS https://atomic-swap.defi-oracle.io/data/deployed-venue-inventory.json | jq -e '.networks[] | select(.chainId == 138) | .venueCounts.deployedVenuePoolCount >= 19 and .summary.totalVenues >= 19' >/dev/null
echo "Deployed atomic-swap-dapp to VMID $VMID via $PROXMOX_HOST"

View File

@@ -0,0 +1,130 @@
#!/usr/bin/env bash
# Deploy Phoenix Deploy API to the dev VM (canonical: VMID 5700, IP_DEV_VM).
# Installs to /opt/phoenix-deploy-api and enables systemd (see phoenix-deploy-api/scripts/install-systemd.sh).
#
# Layout on the workstation: repo root must contain phoenix-deploy-api/ and
# config/public-sector-program-manifest.json (copied into /opt by install-systemd).
# Include phoenix-deploy-api/.env in your tree before deploy (not committed); it is packed if present.
#
# Requires: LAN SSH to the Proxmox node that hosts VMID 5700 (see get_host_for_vmid in
# scripts/lib/load-project-env.sh). Default PVE: r630-02 for 5700.
#
# Usage:
# ./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --dry-run
# ./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --apply
# ./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --apply --start-ct # pct start 5700 on PVE if stopped
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || {
echo "ERROR: load-project-env.sh not found at ${PROJECT_ROOT}/scripts/lib/load-project-env.sh" >&2
exit 1
}
VMID="${PHOENIX_DEPLOY_DEV_VM_VMID:-5700}"
PVE_HOST="${PHOENIX_DEPLOY_PVE_HOST:-$(get_host_for_vmid "$VMID")}"
PVE_USER="${PHOENIX_DEPLOY_PVE_USER:-root}"
SSH_OPTS="${PHOENIX_DEPLOY_SSH_OPTS:--o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new}"
IP_DEV_VM="${IP_DEV_VM:-192.168.11.59}"
DRY_RUN=1
START_CT=0
for a in "$@"; do
if [[ "$a" == "--apply" ]]; then DRY_RUN=0; fi
if [[ "$a" == "--dry-run" ]]; then DRY_RUN=1; fi
if [[ "$a" == "--start-ct" ]]; then START_CT=1; fi
done
MANIFEST="${PROJECT_ROOT}/config/public-sector-program-manifest.json"
if [[ ! -f "$MANIFEST" ]]; then
echo "WARN: missing ${MANIFEST} — install on CT will warn; add file or fix path." >&2
fi
if [[ ! -d "${PROJECT_ROOT}/phoenix-deploy-api" ]]; then
echo "ERROR: ${PROJECT_ROOT}/phoenix-deploy-api not found." >&2
exit 1
fi
echo "=============================================="
echo "Phoenix Deploy API → dev VM"
echo " VMID: $VMID (expected IP: $IP_DEV_VM)"
echo " PVE host: ${PVE_USER}@${PVE_HOST}"
echo " Dry-run: $DRY_RUN"
echo "=============================================="
REMOTE_TAR="/tmp/pda-deploy-bundle.tar.gz"
STAGE="/tmp/proxmox-pda-stage"
remote_block() {
# shellcheck disable=SC2029
ssh $SSH_OPTS "${PVE_USER}@${PVE_HOST}" "$@"
}
if [[ "$DRY_RUN" -eq 1 ]]; then
echo "Dry-run only. Would:"
echo " 1. tar czf (phoenix-deploy-api + config/public-sector-program-manifest.json)"
echo " 2. scp bundle → ${PVE_USER}@${PVE_HOST}:${REMOTE_TAR}"
echo " 3. pct push ${VMID} … /root/pda-deploy.tar.gz && pct exec ${VMID} -- install-systemd.sh"
echo " 4. curl http://${IP_DEV_VM}:4001/health"
echo "Optional: --start-ct starts VMID ${VMID} on ${PVE_HOST} if it is stopped (pct must target a running CT)."
echo "Re-run with --apply to execute."
exit 0
fi
TMP_TAR="$(mktemp /tmp/pda-deploy-XXXXXX.tar.gz)"
cleanup() { rm -f "$TMP_TAR"; }
trap cleanup EXIT
cd "$PROJECT_ROOT"
tar czf "$TMP_TAR" phoenix-deploy-api config/public-sector-program-manifest.json
ensure_ct_running() {
if remote_block "pct exec ${VMID} -- true 2>/dev/null"; then
return 0
fi
echo "CT ${VMID} is not running or not reachable (pct exec failed)." >&2
if [[ "$START_CT" -eq 1 ]]; then
echo "Starting CT ${VMID} on ${PVE_HOST} (--start-ct)..."
if ! remote_block "pct start ${VMID}"; then
echo "pct start failed — CT may not exist on this node. Find VMID: ssh ${PVE_USER}@${PVE_HOST} \"pct list\"" >&2
echo "Override: PHOENIX_DEPLOY_PVE_HOST=<node-ip> PHOENIX_DEPLOY_DEV_VM_VMID=<id> $0 --apply" >&2
exit 1
fi
sleep 3
if ! remote_block "pct exec ${VMID} -- true 2>/dev/null"; then
echo "CT ${VMID} still not reachable after start." >&2
exit 1
fi
return 0
fi
echo "Start the dev VM first, e.g. on ${PVE_HOST}: pct start ${VMID}" >&2
echo "Or re-run with --apply --start-ct (scoped to this script only)." >&2
exit 1
}
run_deploy() {
ensure_ct_running
echo "[1/3] Upload bundle to PVE..."
scp $SSH_OPTS "$TMP_TAR" "${PVE_USER}@${PVE_HOST}:${REMOTE_TAR}"
echo "[2/3] pct push → CT ${VMID}, extract, install-systemd..."
remote_block bash -s <<REMOTE_EOF
set -euo pipefail
pct push ${VMID} ${REMOTE_TAR} /root/pda-deploy.tar.gz
pct exec ${VMID} -- bash -c "set -euo pipefail; rm -rf ${STAGE}; mkdir -p ${STAGE}; tar xzf /root/pda-deploy.tar.gz -C ${STAGE}; cd ${STAGE} && bash phoenix-deploy-api/scripts/install-systemd.sh; rm -f /root/pda-deploy.tar.gz"
rm -f ${REMOTE_TAR}
REMOTE_EOF
echo "[3/3] Health check on dev VM (LAN)..."
if command -v curl >/dev/null 2>&1; then
curl -sS --max-time 10 -o /dev/null -w " http://${IP_DEV_VM}:4001/health → HTTP %{http_code}\n" "http://${IP_DEV_VM}:4001/health" || echo " (curl failed — check firewall or service)"
else
echo " (curl not installed locally; skip health check)"
fi
}
run_deploy
echo "Done."

View File

@@ -1,244 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/scripts/lib/load-project-env.sh"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
PHOENIX_DEPLOY_WORKSPACE="${PHOENIX_DEPLOY_WORKSPACE:-}"
PROXMOX_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-root}"
VMID="${CURRENCICOMBO_PHOENIX_VMID:-8604}"
CT_IP="${IP_CURRENCICOMBO_PHOENIX:-10.160.0.14}"
CT_REPO_DIR="${CT_REPO_DIR:-/var/lib/currencicombo/repo}"
PUBLIC_URL="${PUBLIC_URL:-https://curucombo.xn--vov0g.com}"
PUBLIC_DOMAIN="${PUBLIC_DOMAIN:-curucombo.xn--vov0g.com}"
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS:-192.168.11.167}:81}"
NPM_EMAIL="${NPM_EMAIL:-}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
DRY_RUN=0
usage() {
cat <<'USAGE'
Usage: phoenix-deploy-currencicombo-from-workspace.sh [--dry-run]
Requires:
PHOENIX_DEPLOY_WORKSPACE Full staged CurrenciCombo checkout prepared by phoenix-deploy-api
This script:
1. Packs the staged repo workspace.
2. Pushes it into CT 8604 on r630-01.
3. Ensures host prerequisites, install.sh, prune cron, and deploy script run in-CT.
4. Updates the public NPMplus host so /api/* preserves the full path and supports SSE.
5. Verifies the public portal + /api/ready end to end.
USAGE
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=1; shift ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown arg: $1" >&2; usage; exit 2 ;;
esac
done
log() { printf '[currencicombo-phoenix] %s\n' "$*" >&2; }
die() { printf '[currencicombo-phoenix][FATAL] %s\n' "$*" >&2; exit 1; }
run() { if [[ "$DRY_RUN" -eq 1 ]]; then printf '[dry-run] %s\n' "$*" >&2; else eval "$*"; fi; }
need_cmd() { command -v "$1" >/dev/null 2>&1 || die "missing required command: $1"; }
for cmd in ssh scp tar curl jq mktemp; do
need_cmd "$cmd"
done
[[ -n "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "PHOENIX_DEPLOY_WORKSPACE is required"
[[ -d "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "staged workspace missing: $PHOENIX_DEPLOY_WORKSPACE"
if [[ "$DRY_RUN" -eq 0 ]]; then
[[ -n "$NPM_EMAIL" ]] || die "NPM_EMAIL is required"
[[ -n "$NPM_PASSWORD" ]] || die "NPM_PASSWORD is required"
fi
SSH_TARGET="${PROXMOX_SSH_USER}@${PROXMOX_HOST}"
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
TMP_DIR="$(mktemp -d /tmp/currencicombo-phoenix-XXXXXX)"
ARCHIVE_PATH="${TMP_DIR}/currencicombo-workspace.tgz"
REMOTE_ARCHIVE="/tmp/$(basename "$ARCHIVE_PATH")"
CT_ARCHIVE="/root/$(basename "$ARCHIVE_PATH")"
NPM_COOKIE_JAR="${TMP_DIR}/npm-cookies.txt"
cleanup() {
rm -rf "$TMP_DIR"
}
trap cleanup EXIT
ssh_remote() {
local cmd="$1"
if [[ "$DRY_RUN" -eq 1 ]]; then
printf '[dry-run] ssh %q %q\n' "$SSH_TARGET" "$cmd" >&2
else
ssh "${SSH_OPTS[@]}" "$SSH_TARGET" "$cmd"
fi
}
pct_exec_script() {
local local_script="$1"
local remote_script
local ct_script
remote_script="/tmp/$(basename "$local_script")"
ct_script="/root/$(basename "$local_script")"
run "scp ${SSH_OPTS[*]} '$local_script' '${SSH_TARGET}:${remote_script}'"
ssh_remote "pct push ${VMID} '${remote_script}' '${ct_script}' --perms 0755 && rm -f '${remote_script}' && pct exec ${VMID} -- bash '${ct_script}' && pct exec ${VMID} -- rm -f '${ct_script}'"
}
log "packing staged workspace from ${PHOENIX_DEPLOY_WORKSPACE}"
run "tar -C '$PHOENIX_DEPLOY_WORKSPACE' --exclude='.git' --exclude='node_modules' --exclude='dist' --exclude='orchestrator/node_modules' --exclude='orchestrator/dist' -czf '$ARCHIVE_PATH' ."
log "ensuring CT ${VMID} is running on ${PROXMOX_HOST}"
ssh_remote "pct start ${VMID} >/dev/null 2>&1 || true"
log "uploading staged archive to CT ${VMID}"
run "scp ${SSH_OPTS[*]} '$ARCHIVE_PATH' '${SSH_TARGET}:${REMOTE_ARCHIVE}'"
ssh_remote "pct push ${VMID} '${REMOTE_ARCHIVE}' '${CT_ARCHIVE}' && rm -f '${REMOTE_ARCHIVE}'"
CT_SCRIPT="${TMP_DIR}/currencicombo-ct-deploy.sh"
cat > "$CT_SCRIPT" <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
export DEBIAN_FRONTEND=noninteractive
ARCHIVE_PATH="__CT_ARCHIVE__"
REPO_DIR="__CT_REPO_DIR__"
need_pkg() {
dpkg -s "$1" >/dev/null 2>&1
}
apt-get update -qq
for pkg in ca-certificates curl git jq postgresql redis-server rsync build-essential; do
need_pkg "$pkg" || apt-get install -y -qq "$pkg"
done
if ! command -v node >/dev/null 2>&1 || ! node -v 2>/dev/null | grep -q '^v20\.'; then
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
apt-get install -y -qq nodejs
fi
systemctl enable --now postgresql >/dev/null 2>&1 || true
systemctl enable --now redis-server >/dev/null 2>&1 || true
if [[ ! -f /root/currencicombo-prephoenix-archive.tgz && -d /opt/currencicombo ]]; then
tar -czf /root/currencicombo-prephoenix-archive.tgz /opt/currencicombo /etc/currencicombo 2>/dev/null || true
fi
install -d -o root -g root -m 0755 "$(dirname "$REPO_DIR")"
rm -rf "$REPO_DIR"
mkdir -p "$REPO_DIR"
tar -xzf "$ARCHIVE_PATH" -C "$REPO_DIR"
rm -f "$ARCHIVE_PATH"
bash "$REPO_DIR/scripts/deployment/install.sh"
bash "$REPO_DIR/scripts/deployment/install-prune-cron.sh"
CC_GIT_REF=local bash "$REPO_DIR/scripts/deployment/deploy-currencicombo-8604.sh"
systemctl is-active currencicombo-orchestrator.service currencicombo-webapp.service
curl -fsS http://127.0.0.1:8080/ready
curl -fsS http://127.0.0.1:3000/ >/dev/null
EOF
perl -0pi -e "s|__CT_ARCHIVE__|${CT_ARCHIVE//|/\\|}|g; s|__CT_REPO_DIR__|${CT_REPO_DIR//|/\\|}|g" "$CT_SCRIPT"
log "running install + deploy inside CT ${VMID}"
pct_exec_script "$CT_SCRIPT"
if [[ "$DRY_RUN" -eq 0 ]]; then
log "updating NPMplus proxy host for ${PUBLIC_DOMAIN}"
AUTH_JSON="$(jq -nc --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')"
TOKEN_RESPONSE="$(curl -sk -X POST "$NPM_URL/api/tokens" -H 'Content-Type: application/json' -d "$AUTH_JSON" -c "$NPM_COOKIE_JAR")"
TOKEN="$(echo "$TOKEN_RESPONSE" | jq -r '.token // .accessToken // .access_token // .data.token // empty' 2>/dev/null)"
USE_COOKIE_AUTH=0
if [[ -z "$TOKEN" || "$TOKEN" == "null" ]]; then
if echo "$TOKEN_RESPONSE" | jq -e '.expires' >/dev/null 2>&1; then
USE_COOKIE_AUTH=1
else
die "NPMplus authentication failed"
fi
fi
npm_api() {
if [[ "$USE_COOKIE_AUTH" -eq 1 ]]; then
curl -sk -b "$NPM_COOKIE_JAR" "$@"
else
curl -sk -H "Authorization: Bearer $TOKEN" "$@"
fi
}
HOSTS_JSON="$(npm_api -X GET "$NPM_URL/api/nginx/proxy-hosts")"
HOST_ID="$(echo "$HOSTS_JSON" | jq -r --arg domain "$PUBLIC_DOMAIN" '
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
| map(select(.domain_names | type == "array"))
| map(select(any(.domain_names[]; . == $domain)))
| .[0].id // empty
')"
[[ -n "$HOST_ID" ]] || die "NPMplus proxy host not found for ${PUBLIC_DOMAIN}"
ADVANCED_CONFIG="$(cat <<CFG
location ^~ /api/ {
proxy_pass http://${CT_IP}:8080;
proxy_http_version 1.1;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header Connection \"\";
proxy_buffering off;
proxy_cache off;
proxy_read_timeout 24h;
proxy_send_timeout 24h;
add_header Cache-Control \"no-cache\";
}
CFG
)"
PAYLOAD="$(echo "$HOSTS_JSON" | jq -c --arg domain "$PUBLIC_DOMAIN" --arg host "$CT_IP" --arg advanced "$ADVANCED_CONFIG" '
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
| map(select(.domain_names | type == "array"))
| map(select(any(.domain_names[]; . == $domain)))
| .[0]
| {
domain_names,
forward_scheme: (.forward_scheme // "http"),
forward_host: $host,
forward_port: 3000,
access_list_id,
certificate_id,
ssl_forced,
caching_enabled,
block_exploits,
advanced_config: $advanced,
allow_websocket_upgrade,
http2_support,
hsts_enabled,
hsts_subdomains,
enabled
}
')"
[[ -n "$PAYLOAD" && "$PAYLOAD" != "null" ]] || die "failed to build NPMplus update payload"
UPDATE_RESPONSE="$(npm_api -X PUT "$NPM_URL/api/nginx/proxy-hosts/${HOST_ID}" -H 'Content-Type: application/json' -d "$PAYLOAD")"
echo "$UPDATE_RESPONSE" | jq -e '.id != null' >/dev/null 2>&1 || die "NPMplus proxy host update failed"
log "running public smoke checks"
HEADERS="$(curl -skI "$PUBLIC_URL/")"
echo "$HEADERS" | grep -q '^HTTP/2 200' || die "public root is not HTTP 200"
if echo "$HEADERS" | grep -qi '^x-nextjs-prerender:'; then
die "old Next.js headers still present on public root"
fi
curl -sk "$PUBLIC_URL/" | grep -F '<title>Solace Bank Group PLC — Treasury Management Portal</title>' >/dev/null || die "public title mismatch"
READY_BODY="$(curl -sk "$PUBLIC_URL/api/ready")"
echo "$READY_BODY" | grep -F '"ready":true' >/dev/null || die "public /api/ready failed"
curl -skN --max-time 5 -H 'Accept: text/event-stream' "$PUBLIC_URL/api/plans/demo-pay-014/status/stream" | grep -F '"type":"connected"' >/dev/null || die "public SSE smoke failed"
log "capturing EXT-* blocker summary"
ssh_remote "pct exec ${VMID} -- journalctl -u currencicombo-orchestrator.service -n 200 --no-pager | grep -E 'ExternalBlockers|EXT-' || true"
fi
log "CurrenciCombo Phoenix deploy completed from ${PHOENIX_DEPLOY_WORKSPACE}"

View File

@@ -131,6 +131,7 @@ export DBIS_CORE_DIR="${DBIS_CORE_DIR:-${PROJECT_ROOT}/dbis_core}"
# Covers: DBIS (101xx), RPC (2101-2103, 2201, 2301, etc.), Blockscout (5000), CCIP (5400-5476), NPMplus (10233, 10234), Sankofa stack (78007806)
# Live placement (2026-04-09): validators 1003/1004, sentries 1503-1510, and RPCs 2102, 2301, 2304, 2400, 2402, 2403 on r630-03;
# RPCs 2201, 2303, 2305-2308, 2401 on r630-02; 2101 + 2103 remain on r630-01 — see ALL_VMIDS_ENDPOINTS.md
# Dev VM (GitOps / Gitea sidecar target): VMID 5700 on r630-04 (verified cluster API 2026-04-17)
get_host_for_vmid() {
local vmid="$1"
case "$vmid" in
@@ -138,7 +139,8 @@ get_host_for_vmid() {
10130|10150|10151|106|107|108|10000|10001|10020|10100|10101|10120|10203|10233|10235) echo "${PROXMOX_HOST_R630_01}";;
1000|1001|1002|1500|1501|1502|2101|2103) echo "${PROXMOX_HOST_R630_01}";;
1003|1004|1503|1504|1505|1506|1507|1508|1509|1510|2102|2301|2304|2400|2402|2403) echo "${PROXMOX_HOST_R630_03}";;
5000|5700|7810|2201|2303|2305|2306|2307|2308|2401|6200|6201|6202|6203|6204|6205|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";;
5700) echo "${PROXMOX_HOST_R630_04}";;
5000|7810|2201|2303|2305|2306|2307|2308|2401|6200|6201|6202|6203|6204|6205|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";;
2420|2430|2440|2460|2470|2480) echo "${PROXMOX_HOST_R630_01}";;
5400|5401|5402|5403|5410|5411|5412|5413|5414|5415|5416|5417|5418|5419|5420|5421|5422|5423|5424|5425|5440|5441|5442|5443|5444|5445|5446|5447|5448|5449|5450|5451|5452|5453|5454|5455|5470|5471|5472|5473|5474|5475|5476) echo "${PROXMOX_HOST_R630_02}";;
*) echo "${PROXMOX_HOST_R630_01:-${PROXMOX_R630_02}}";;

View File

@@ -1,56 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
SOURCE_TARGET_PAIRS=(
".gitea/workflow-sources/deploy-to-phoenix.yml:.gitea/workflows/deploy-to-phoenix.yml"
".gitea/workflow-sources/validate-on-pr.yml:.gitea/workflows/validate-on-pr.yml"
)
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
if git remote | grep -qx gitea; then
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
fi
missing_ref=false
for ref in "$REMOTE/main" "$REMOTE/master"; do
if ! git rev-parse --verify "$ref" >/dev/null 2>&1; then
missing_ref=true
fi
done
if [[ "$missing_ref" == true ]]; then
echo "[i] Skipping main/master workflow parity check ($REMOTE/main or $REMOTE/master not available)"
exit 0
fi
for pair in "${SOURCE_TARGET_PAIRS[@]}"; do
source="${pair%%:*}"
target="${pair##*:}"
main_blob="$(git show "$REMOTE/main:$source" 2>/dev/null || true)"
master_blob="$(git show "$REMOTE/master:$source" 2>/dev/null || true)"
if [[ -z "$main_blob" ]]; then
main_blob="$(git show "$REMOTE/main:$target" 2>/dev/null || true)"
fi
if [[ -z "$master_blob" ]]; then
master_blob="$(git show "$REMOTE/master:$target" 2>/dev/null || true)"
fi
if [[ -z "$main_blob" || -z "$master_blob" ]]; then
echo "[✗] Missing $source/$target on $REMOTE/main or $REMOTE/master" >&2
exit 1
fi
if [[ "$main_blob" != "$master_blob" ]]; then
echo "[✗] Branch workflow drift: $source differs between $REMOTE/main and $REMOTE/master" >&2
echo " Keep both deploy branches in lockstep for workflow-source files." >&2
exit 1
fi
echo "[✓] Branch parity OK for $source"
done

View File

@@ -1,32 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
check_one() {
local source_rel="$1"
local target_rel="$2"
if [[ ! -f "$source_rel" ]]; then
echo "[✗] Missing workflow source: $source_rel" >&2
return 1
fi
if [[ ! -f "$target_rel" ]]; then
echo "[✗] Missing generated workflow: $target_rel" >&2
return 1
fi
if ! diff -u "$source_rel" "$target_rel" >/dev/null; then
echo "[✗] Workflow drift detected: $target_rel does not match $source_rel" >&2
echo " Run: bash scripts/verify/sync-gitea-workflows.sh" >&2
return 1
fi
echo "[✓] $target_rel matches $source_rel"
}
check_one ".gitea/workflow-sources/deploy-to-phoenix.yml" ".gitea/workflows/deploy-to-phoenix.yml"
check_one ".gitea/workflow-sources/validate-on-pr.yml" ".gitea/workflows/validate-on-pr.yml"

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env bash
# Every path listed under "packages:" in pnpm-workspace.yaml must have a matching
# importer entry in pnpm-lock.yaml. If one is missing, pnpm can fail in confusing
# ways (e.g. pnpm outdated -r: Cannot read ... 'optionalDependencies').
# Usage: bash scripts/verify/check-pnpm-workspace-lockfile.sh
# Exit: 0 if check passes or pnpm is not used; 1 on mismatch.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
WS="${PROJECT_ROOT}/pnpm-workspace.yaml"
LOCK="${PROJECT_ROOT}/pnpm-lock.yaml"
if [[ ! -f "$WS" ]] || [[ ! -f "$LOCK" ]]; then
echo " (skip: pnpm-workspace.yaml or pnpm-lock.yaml not present at repo root)"
exit 0
fi
# Paths under the top-level `packages:` block only (stops at next top-level key)
mapfile -t _paths < <(awk '
/^packages:/ { p=1; next }
p && /^[a-zA-Z]/ && $0 !~ /^packages/ { exit }
p && /^[[:space:]]*-[[:space:]]/ {
sub(/^[[:space:]]*-[[:space:]]+/, "")
sub(/[[:space:]]*#.*/, "")
gsub(/[[:space:]]+$/, "")
if (length) print
}
' "$WS")
missing=()
for relp in "${_paths[@]}"; do
if [[ -z "$relp" ]]; then
continue
fi
if ! grep -qFx " ${relp}:" "$LOCK"; then
missing+=("$relp")
fi
done
if [[ ${#missing[@]} -gt 0 ]]; then
echo "✗ pnpm lockfile is missing importer(s) for these workspace path(s):"
printf ' %q\n' "${missing[@]}"
echo " Run: pnpm install (at repo root) to refresh pnpm-lock.yaml"
exit 1
fi
echo " pnpm workspace / lockfile importers aligned (${#_paths[@]} path(s))."
exit 0

View File

@@ -3,7 +3,6 @@
# Use for CI or pre-deploy: dependencies, config files, optional genesis.
# Usage: bash scripts/verify/run-all-validation.sh [--skip-genesis]
# --skip-genesis: do not run validate-genesis.sh (default: run if smom-dbis-138 present).
# Steps: dependencies, config files, cW* mesh matrix (if pair-discovery JSON exists), genesis.
set -euo pipefail
@@ -25,64 +24,15 @@ bash "$SCRIPT_DIR/check-dependencies.sh" || log_err "check-dependencies failed"
log_ok "Dependencies OK"
echo ""
echo "1b. pnpm workspace vs lockfile..."
if [[ -f "$PROJECT_ROOT/pnpm-workspace.yaml" ]]; then
bash "$SCRIPT_DIR/check-pnpm-workspace-lockfile.sh" || log_err "pnpm lockfile / workspace drift"
log_ok "pnpm lockfile aligned with workspace"
else
echo " (no pnpm-workspace.yaml at root — skip)"
fi
echo ""
echo "1c. Gitea workflow source sync..."
bash "$SCRIPT_DIR/check-gitea-workflows.sh" || log_err "Gitea workflow source drift"
log_ok "Gitea workflows match source-of-truth files"
echo ""
echo "1d. main/master workflow parity..."
bash "$SCRIPT_DIR/check-gitea-branch-workflow-parity.sh" || log_err "main/master workflow parity drift"
log_ok "main/master workflow parity OK"
echo ""
echo "2. Config files..."
bash "$SCRIPT_DIR/../validation/validate-config-files.sh" || log_err "validate-config-files failed"
log_ok "Config validation OK"
echo ""
echo "3. cW* mesh matrix (deployment-status + Uni V2 pair-discovery)..."
DISCOVERY_JSON="$PROJECT_ROOT/reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json"
if [[ -f "$DISCOVERY_JSON" ]]; then
MATRIX_JSON="$PROJECT_ROOT/reports/status/cw-mesh-deployment-matrix-latest.json"
bash "$SCRIPT_DIR/build-cw-mesh-deployment-matrix.sh" --no-markdown --json-out "$MATRIX_JSON" || log_err "cw mesh matrix merge failed"
log_ok "cW mesh matrix OK (also wrote $MATRIX_JSON)"
else
echo " ($DISCOVERY_JSON missing — run: bash scripts/verify/build-promod-uniswap-v2-live-pair-discovery.sh)"
fi
echo ""
echo "3b. deployment-status graph (cross-chain-pmm-lps)..."
PMM_VALIDATE="$PROJECT_ROOT/cross-chain-pmm-lps/scripts/validate-deployment-status.cjs"
if [[ -f "$PMM_VALIDATE" ]] && command -v node &>/dev/null; then
node "$PMM_VALIDATE" || log_err "validate-deployment-status.cjs failed"
log_ok "deployment-status.json rules OK"
else
echo " (skip: node or $PMM_VALIDATE missing)"
fi
echo ""
echo "3c. External dependency blockers..."
EXT_CHECK="$SCRIPT_DIR/check-external-dependencies.sh"
if [[ -x "$EXT_CHECK" ]]; then
bash "$EXT_CHECK" --advisory || true
else
echo " (skip: $EXT_CHECK missing)"
fi
echo ""
if [[ "$SKIP_GENESIS" == true ]]; then
echo "4. Genesis — skipped (--skip-genesis)"
echo "3. Genesis — skipped (--skip-genesis)"
else
echo "4. Genesis (smom-dbis-138)..."
echo "3. Genesis (smom-dbis-138)..."
GENESIS_SCRIPT="$PROJECT_ROOT/smom-dbis-138/scripts/validation/validate-genesis.sh"
if [[ -x "$GENESIS_SCRIPT" ]]; then
bash "$GENESIS_SCRIPT" || log_err "validate-genesis failed"

View File

@@ -1,18 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
sync_one() {
local source_rel="$1"
local target_rel="$2"
mkdir -p "$(dirname "$target_rel")"
cp "$source_rel" "$target_rel"
echo "[✓] Synced $target_rel from $source_rel"
}
sync_one ".gitea/workflow-sources/deploy-to-phoenix.yml" ".gitea/workflows/deploy-to-phoenix.yml"
sync_one ".gitea/workflow-sources/validate-on-pr.yml" ".gitea/workflows/validate-on-pr.yml"

View File

@@ -0,0 +1,417 @@
#!/usr/bin/env bash
set -euo pipefail
# Verify the deployed Chain 138 cross-chain flash infrastructure on Blockscout
# using the exact Foundry deployment lineage from
# DeployCrossChainFlashInfrastructure.s.sol.
#
# Usage:
# bash scripts/verify/verify-chain138-flash-infra-blockscout.sh
# bash scripts/verify/verify-chain138-flash-infra-blockscout.sh --status-only
# bash scripts/verify/verify-chain138-flash-infra-blockscout.sh --only UniversalCCIPFlashBridgeAdapter
# bash scripts/verify/verify-chain138-flash-infra-blockscout.sh --force-submit
#
# Notes:
# - By default this script refuses to submit when the current local Foundry
# runtime artifact does not match the deployed Chain 138 runtime bytecode.
# - Use --force-submit only after you intentionally decide to test a candidate
# historical source/build lineage anyway.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM_SOURCE_ROOT="${CHAIN138_VERIFY_SOURCE_ROOT:-${PROJECT_ROOT}/smom-dbis-138}"
SMOM_BROADCAST_ROOT="${CHAIN138_VERIFY_BROADCAST_ROOT:-${PROJECT_ROOT}/smom-dbis-138}"
FLASH_BROADCAST="${SMOM_BROADCAST_ROOT}/broadcast/DeployCrossChainFlashInfrastructure.s.sol/138/run-latest.json"
if [[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]]; then
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
fi
command -v forge >/dev/null 2>&1 || { echo "ERROR: forge not found"; exit 1; }
command -v node >/dev/null 2>&1 || { echo "ERROR: node not found"; exit 1; }
command -v cast >/dev/null 2>&1 || { echo "ERROR: cast not found"; exit 1; }
command -v jq >/dev/null 2>&1 || { echo "ERROR: jq not found"; exit 1; }
command -v curl >/dev/null 2>&1 || { echo "ERROR: curl not found"; exit 1; }
[[ -f "${FLASH_BROADCAST}" ]] || { echo "ERROR: missing broadcast ${FLASH_BROADCAST}"; exit 1; }
RPC_URL="${RPC_URL_138:-${CHAIN138_RPC_URL:-http://192.168.11.211:8545}}"
BLOCKSCOUT_URL="${CHAIN138_BLOCKSCOUT_INTERNAL_URL:-http://${IP_BLOCKSCOUT:-192.168.11.140}:4000}"
BLOCKSCOUT_API_BASE="${CHAIN138_BLOCKSCOUT_API_BASE:-${BLOCKSCOUT_URL}/api/v2}"
BLOCKSCOUT_PUBLIC_API_BASE="${CHAIN138_BLOCKSCOUT_PUBLIC_API_BASE:-https://explorer.d-bis.org/api/v2}"
VERIFIER_PORT="${FORGE_VERIFIER_PROXY_PORT:-3080}"
FORGE_VERIFIER_URL="${FORGE_VERIFIER_URL:-http://127.0.0.1:${VERIFIER_PORT}/api}"
WAIT_ATTEMPTS="${CHAIN138_FLASH_VERIFY_WAIT_ATTEMPTS:-18}"
WAIT_SECONDS="${CHAIN138_FLASH_VERIFY_WAIT_SECONDS:-5}"
ONLY_LIST=""
STATUS_ONLY=0
NO_WAIT=0
FORCE_SUBMIT=0
PROXY_PID=""
while [[ $# -gt 0 ]]; do
case "$1" in
--only) ONLY_LIST="${2:-}"; shift 2 ;;
--status-only) STATUS_ONLY=1; shift ;;
--no-wait) NO_WAIT=1; shift ;;
--force-submit) FORCE_SUBMIT=1; shift ;;
*)
echo "Unknown argument: $1" >&2
exit 1
;;
esac
done
cleanup_proxy() {
[[ -n "${PROXY_PID:-}" ]] && kill "${PROXY_PID}" 2>/dev/null || true
}
trap cleanup_proxy EXIT
log() { printf '%s\n' "$*"; }
ok() { printf '[ok] %s\n' "$*"; }
warn() { printf '[warn] %s\n' "$*" >&2; }
fail() { printf '[fail] %s\n' "$*" >&2; exit 1; }
should_handle() {
local name="$1"
[[ -n "${ONLY_LIST}" ]] && [[ ",${ONLY_LIST}," != *",${name},"* ]] && return 1
return 0
}
proxy_listening() {
if command -v nc >/dev/null 2>&1; then
nc -z -w 2 127.0.0.1 "${VERIFIER_PORT}" 2>/dev/null
else
timeout 2 bash -c "echo >/dev/tcp/127.0.0.1/${VERIFIER_PORT}" 2>/dev/null
fi
}
start_proxy_if_needed() {
if proxy_listening; then
ok "Forge verification proxy already listening on ${VERIFIER_PORT}."
return 0
fi
log "Starting forge verification proxy on ${VERIFIER_PORT} -> ${BLOCKSCOUT_URL}"
PORT="${VERIFIER_PORT}" BLOCKSCOUT_URL="${BLOCKSCOUT_URL}" node "${PROJECT_ROOT}/forge-verification-proxy/server.js" >/tmp/chain138-flash-infra-blockscout-proxy.log 2>&1 &
PROXY_PID=$!
sleep 2
proxy_listening || fail "Forge verification proxy failed to start. See /tmp/chain138-flash-infra-blockscout-proxy.log"
}
verification_status_json() {
local addr="$1"
local raw
local base
for base in "${BLOCKSCOUT_API_BASE}" "${BLOCKSCOUT_PUBLIC_API_BASE}"; do
raw="$(curl --max-time 20 -fsS "${base}/smart-contracts/${addr}" 2>/dev/null || true)"
if [[ -n "${raw}" ]] && jq -e 'type == "object"' >/dev/null 2>&1 <<<"${raw}"; then
printf '%s' "${raw}"
return 0
fi
done
return 1
}
is_verified() {
local addr="$1"
local expected_name="$2"
local json name compiler
json="$(verification_status_json "${addr}")" || return 1
name="$(jq -r '.name // empty' <<<"${json}")"
compiler="$(jq -r '.compiler_version // empty' <<<"${json}")"
[[ -n "${name}" && -n "${compiler}" && "${name}" == "${expected_name}" ]]
}
wait_for_verification() {
local label="$1"
local addr="$2"
local expected_name="$3"
local attempt json name compiler
for (( attempt=1; attempt<=WAIT_ATTEMPTS; attempt++ )); do
json="$(verification_status_json "${addr}")" || json=""
name="$(jq -r '.name // empty' <<<"${json}" 2>/dev/null || true)"
compiler="$(jq -r '.compiler_version // empty' <<<"${json}" 2>/dev/null || true)"
if [[ -n "${name}" && -n "${compiler}" && "${name}" == "${expected_name}" ]]; then
ok "${label} verified on Blockscout as ${name} (${compiler})."
return 0
fi
sleep "${WAIT_SECONDS}"
done
return 1
}
broadcast_commit() {
jq -r '.commit' "${FLASH_BROADCAST}"
}
broadcast_timestamp() {
jq -r '.timestamp' "${FLASH_BROADCAST}"
}
broadcast_address() {
local name="$1"
jq -r --arg name "${name}" '.transactions[] | select(.transactionType=="CREATE" and .contractName==$name) | .contractAddress' "${FLASH_BROADCAST}" | head -n1
}
broadcast_tx_hash() {
local name="$1"
jq -r --arg name "${name}" '.transactions[] | select(.transactionType=="CREATE" and .contractName==$name) | .hash' "${FLASH_BROADCAST}" | head -n1
}
broadcast_arg() {
local name="$1"
local index="$2"
jq -r --arg name "${name}" --argjson index "${index}" '.transactions[] | select(.transactionType=="CREATE" and .contractName==$name) | .arguments[$index]' "${FLASH_BROADCAST}" | head -n1
}
contract_path() {
case "$1" in
UniversalCCIPFlashBridgeAdapter) printf '%s' 'contracts/flash/UniversalCCIPFlashBridgeAdapter.sol:UniversalCCIPFlashBridgeAdapter' ;;
CrossChainFlashRepayReceiver) printf '%s' 'contracts/flash/CrossChainFlashRepayReceiver.sol:CrossChainFlashRepayReceiver' ;;
CrossChainFlashVaultCreditReceiver) printf '%s' 'contracts/flash/CrossChainFlashVaultCreditReceiver.sol:CrossChainFlashVaultCreditReceiver' ;;
*) return 1 ;;
esac
}
artifact_json_path() {
case "$1" in
UniversalCCIPFlashBridgeAdapter)
if [[ -f "${SMOM_SOURCE_ROOT}/out/scopes/flash/UniversalCCIPFlashBridgeAdapter.sol/UniversalCCIPFlashBridgeAdapter.json" ]]; then
printf '%s' "${SMOM_SOURCE_ROOT}/out/scopes/flash/UniversalCCIPFlashBridgeAdapter.sol/UniversalCCIPFlashBridgeAdapter.json"
else
printf '%s' "${SMOM_SOURCE_ROOT}/out/UniversalCCIPFlashBridgeAdapter.sol/UniversalCCIPFlashBridgeAdapter.json"
fi
;;
CrossChainFlashRepayReceiver)
if [[ -f "${SMOM_SOURCE_ROOT}/out/scopes/flash/CrossChainFlashRepayReceiver.sol/CrossChainFlashRepayReceiver.json" ]]; then
printf '%s' "${SMOM_SOURCE_ROOT}/out/scopes/flash/CrossChainFlashRepayReceiver.sol/CrossChainFlashRepayReceiver.json"
else
printf '%s' "${SMOM_SOURCE_ROOT}/out/CrossChainFlashRepayReceiver.sol/CrossChainFlashRepayReceiver.json"
fi
;;
CrossChainFlashVaultCreditReceiver)
if [[ -f "${SMOM_SOURCE_ROOT}/out/scopes/flash/CrossChainFlashVaultCreditReceiver.sol/CrossChainFlashVaultCreditReceiver.json" ]]; then
printf '%s' "${SMOM_SOURCE_ROOT}/out/scopes/flash/CrossChainFlashVaultCreditReceiver.sol/CrossChainFlashVaultCreditReceiver.json"
else
printf '%s' "${SMOM_SOURCE_ROOT}/out/CrossChainFlashVaultCreditReceiver.sol/CrossChainFlashVaultCreditReceiver.json"
fi
;;
*) return 1 ;;
esac
}
constructor_signature() {
printf '%s' 'constructor(address)'
}
has_contract_bytecode() {
local addr="$1"
local code
code="$(cast code "${addr}" --rpc-url "${RPC_URL}" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]')" || true
[[ -n "${code}" && "${code}" != "0x" && "${code}" != "0x0" ]]
}
creation_input_report() {
local label="$1"
local constructor_sig="$2"
local constructor_arg="$3"
local artifact_json artifact_bytecode tx_input encoded candidate candidate_keccak tx_keccak
artifact_json="$(artifact_json_path "${label}")" || return 2
[[ -f "${artifact_json}" ]] || return 2
artifact_bytecode="$(jq -r '.bytecode.object // empty' "${artifact_json}" | tr '[:upper:]' '[:lower:]')"
tx_input="$(jq -r --arg name "${label}" '.transactions[] | select(.transactionType=="CREATE" and .contractName==$name) | .transaction.input' "${FLASH_BROADCAST}" | head -n1 | tr '[:upper:]' '[:lower:]')"
[[ -n "${artifact_bytecode}" && -n "${tx_input}" ]] || return 2
encoded="$(cast abi-encode "${constructor_sig}" "${constructor_arg}")"
candidate="${artifact_bytecode}${encoded#0x}"
candidate_keccak="$(cast keccak "${candidate}")"
tx_keccak="$(cast keccak "${tx_input}")"
if [[ "${candidate}" == "${tx_input}" ]]; then
ok "${label}: recovered exact historical creation bytecode/input (${candidate_keccak})."
return 0
fi
warn "${label}: creation bytecode/input does not match recorded deployment transaction."
warn "${label}: candidate_keccak=${candidate_keccak} tx_input_keccak=${tx_keccak}"
return 1
}
runtime_hash_report() {
local label="$1"
local addr="$2"
local constructor_sig="$3"
local constructor_arg="$4"
local artifact_json artifact_runtime chain_runtime artifact_keccak chain_keccak
local immutable_count
artifact_json="$(artifact_json_path "${label}")" || return 2
[[ -f "${artifact_json}" ]] || return 2
immutable_count="$(jq -r '(.deployedBytecode.immutableReferences // {}) | length' "${artifact_json}")"
if [[ "${immutable_count}" != "0" ]]; then
creation_input_report "${label}" "${constructor_sig}" "${constructor_arg}"
return $?
fi
artifact_runtime="$(jq -r '.deployedBytecode.object // empty' "${artifact_json}" | tr '[:upper:]' '[:lower:]')"
chain_runtime="$(cast code "${addr}" --rpc-url "${RPC_URL}" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]')" || true
[[ -n "${artifact_runtime}" && -n "${chain_runtime}" && "${chain_runtime}" != "0x" ]] || return 2
artifact_keccak="$(cast keccak "${artifact_runtime}")"
chain_keccak="$(cast keccak "${chain_runtime}")"
if [[ "${artifact_runtime}" != "${chain_runtime}" ]]; then
warn "${label}: Foundry artifact runtime bytecode does not match deployed bytecode."
warn "${label}: artifact_keccak=${artifact_keccak} chain_keccak=${chain_keccak}"
return 1
fi
ok "${label}: current Foundry runtime bytecode matches deployed bytecode (${artifact_keccak})."
return 0
}
submit_standard_input_from_forge() {
local label="$1"
local addr="$2"
local path="$3"
local constructor_args="$4"
local input_file response message
local compiler_version evm_version optimization_runs via_ir_flag artifact_json mismatch_rc
artifact_json="$(artifact_json_path "${label}")" || fail "${label}: missing artifact mapping"
[[ -f "${artifact_json}" ]] || fail "${label}: missing artifact ${artifact_json}"
if runtime_hash_report "${label}" "${addr}" "$(constructor_signature)" "$(broadcast_arg "${label}" 0)"; then
mismatch_rc=0
else
mismatch_rc=$?
fi
if (( mismatch_rc == 1 && FORCE_SUBMIT == 0 )); then
warn "${label}: skipping submission because the current local artifact does not match deployed runtime bytecode."
warn "${label}: recover the exact historical source/build lineage first, then rerun with --force-submit if you intentionally want to test a candidate."
return 0
fi
compiler_version="v$(jq -r '.metadata.compiler.version // empty' "${artifact_json}")"
evm_version="$(jq -r '.metadata.settings.evmVersion // "default"' "${artifact_json}")"
optimization_runs="$(jq -r '.metadata.settings.optimizer.runs // 200' "${artifact_json}")"
via_ir_flag=(--via-ir)
if [[ "$(jq -r '.metadata.settings.viaIR // false' "${artifact_json}")" != "true" ]]; then
via_ir_flag=()
fi
input_file="$(mktemp)"
(
cd "${SMOM_SOURCE_ROOT}"
forge verify-contract "${addr}" "${path}" \
--chain-id 138 \
--root . \
--compiler-version "${compiler_version}" \
--num-of-optimizations "${optimization_runs}" \
"${via_ir_flag[@]}" \
--evm-version "${evm_version}" \
--show-standard-json-input >"${input_file}"
) || {
rm -f "${input_file}"
fail "${label}: failed to render Foundry standard-input from deployment sources."
}
response="$(
curl --max-time 180 -fsS -X POST \
-F "compiler_version=${compiler_version}" \
-F "contract_name=${path}" \
-F "autodetect_constructor_args=false" \
-F "constructor_args=${constructor_args}" \
-F "optimization_runs=${optimization_runs}" \
-F "is_optimization_enabled=true" \
-F "evm_version=${evm_version}" \
-F "license_type=mit" \
-F "files[0]=@${input_file};type=application/json" \
"${BLOCKSCOUT_URL}/api/v2/smart-contracts/${addr}/verification/via/standard-input"
)" || {
rm -f "${input_file}"
fail "${label}: Blockscout Foundry standard-input submission failed."
}
rm -f "${input_file}"
message="$(jq -r '.message // empty' <<<"${response}")"
if [[ "${message}" == "Smart-contract verification started" ]]; then
ok "${label} Foundry standard-input verification submission accepted."
return 0
fi
warn "${label} Foundry standard-input verification returned: ${response}"
return 1
}
submit_best_verification() {
local label="$1"
local addr="$2"
local path="$3"
local expected_name="$4"
local encoded
has_contract_bytecode "${addr}" || fail "${label} has no bytecode at ${addr}"
if is_verified "${addr}" "${expected_name}"; then
ok "${label} already verified on Blockscout."
return 0
fi
encoded="$(cast abi-encode "$(constructor_signature)" "$(broadcast_arg "${label}" 0)")"
submit_standard_input_from_forge "${label}" "${addr}" "${path}" "${encoded}"
}
log "Chain 138 flash-infra Blockscout verification"
log "RPC: ${RPC_URL}"
log "Explorer API: ${BLOCKSCOUT_API_BASE}"
log "Flash broadcast commit: $(broadcast_commit)"
log "Flash broadcast timestamp: $(broadcast_timestamp)"
log
if (( STATUS_ONLY )); then
for label in UniversalCCIPFlashBridgeAdapter CrossChainFlashRepayReceiver CrossChainFlashVaultCreditReceiver; do
should_handle "${label}" || continue
addr="$(broadcast_address "${label}")"
if is_verified "${addr}" "${label}"; then
ok "${label} already verified on Blockscout."
else
warn "${label} not yet verified on Blockscout."
fi
runtime_hash_report "${label}" "${addr}" "$(constructor_signature)" "$(broadcast_arg "${label}" 0)" || true
done
exit 0
fi
for label in UniversalCCIPFlashBridgeAdapter CrossChainFlashRepayReceiver CrossChainFlashVaultCreditReceiver; do
should_handle "${label}" || continue
addr="$(broadcast_address "${label}")"
tx_hash="$(broadcast_tx_hash "${label}")"
log "Processing ${label} at ${addr} (create tx ${tx_hash})"
submit_best_verification "${label}" "${addr}" "$(contract_path "${label}")" "${label}"
done
if (( NO_WAIT )); then
ok "Submission pass complete; skipping wait/poll because --no-wait was passed."
exit 0
fi
for label in UniversalCCIPFlashBridgeAdapter CrossChainFlashRepayReceiver CrossChainFlashVaultCreditReceiver; do
should_handle "${label}" || continue
addr="$(broadcast_address "${label}")"
if is_verified "${addr}" "${label}"; then
ok "${label} already verified on Blockscout."
continue
fi
if ! wait_for_verification "${label}" "${addr}" "${label}"; then
warn "${label} did not materialize as fully verified within the wait window."
fi
done

View File

@@ -0,0 +1,588 @@
#!/usr/bin/env bash
set -euo pipefail
# Verify the deployed Chain 138 route execution stack and pilot venue contracts on Blockscout.
#
# Usage:
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh --status-only
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh --no-wait
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh --only EnhancedSwapRouterV2,Chain138PilotCurve3Pool
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM_SOURCE_ROOT="${CHAIN138_VERIFY_SOURCE_ROOT:-${PROJECT_ROOT}/smom-dbis-138}"
SMOM_BROADCAST_ROOT="${CHAIN138_VERIFY_BROADCAST_ROOT:-${PROJECT_ROOT}/smom-dbis-138}"
if [[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]]; then
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
fi
command -v forge >/dev/null 2>&1 || { echo "ERROR: forge not found"; exit 1; }
command -v node >/dev/null 2>&1 || { echo "ERROR: node not found"; exit 1; }
command -v cast >/dev/null 2>&1 || { echo "ERROR: cast not found"; exit 1; }
command -v jq >/dev/null 2>&1 || { echo "ERROR: jq not found"; exit 1; }
command -v curl >/dev/null 2>&1 || { echo "ERROR: curl not found"; exit 1; }
RPC_URL="${RPC_URL_138:-${CHAIN138_RPC_URL:-http://192.168.11.211:8545}}"
BLOCKSCOUT_URL="${CHAIN138_BLOCKSCOUT_INTERNAL_URL:-http://${IP_BLOCKSCOUT:-192.168.11.140}:4000}"
BLOCKSCOUT_API_BASE="${CHAIN138_BLOCKSCOUT_API_BASE:-${BLOCKSCOUT_URL}/api/v2}"
BLOCKSCOUT_PUBLIC_API_BASE="${CHAIN138_BLOCKSCOUT_PUBLIC_API_BASE:-https://explorer.d-bis.org/api/v2}"
ROUTE_BROADCAST="${SMOM_BROADCAST_ROOT}/broadcast/DeployEnhancedSwapRouterV2.s.sol/138/run-latest.json"
VERIFIER_PORT="${FORGE_VERIFIER_PROXY_PORT:-3080}"
FORGE_VERIFIER_URL="${FORGE_VERIFIER_URL:-http://127.0.0.1:${VERIFIER_PORT}/api}"
ROUTE_STACK_SOLC_VERSION="${ROUTE_STACK_SOLC_VERSION:-v0.8.20+commit.a1b79de6}"
ROUTE_STACK_EVM_VERSION="${ROUTE_STACK_EVM_VERSION:-shanghai}"
ROUTE_STACK_OPT_RUNS="${ROUTE_STACK_OPT_RUNS:-200}"
ONLY_LIST=""
STATUS_ONLY=0
NO_WAIT=0
PROXY_PID=""
while [[ $# -gt 0 ]]; do
case "$1" in
--only) ONLY_LIST="${2:-}"; shift 2 ;;
--status-only) STATUS_ONLY=1; shift ;;
--no-wait) NO_WAIT=1; shift ;;
*)
echo "Unknown argument: $1" >&2
exit 1
;;
esac
done
cleanup_proxy() {
[[ -n "${PROXY_PID:-}" ]] && kill "${PROXY_PID}" 2>/dev/null || true
}
trap cleanup_proxy EXIT
should_handle() {
local name="$1"
[[ -n "${ONLY_LIST}" ]] && [[ ",${ONLY_LIST}," != *",${name},"* ]] && return 1
return 0
}
log() { printf '%s\n' "$*"; }
ok() { printf '[ok] %s\n' "$*"; }
warn() { printf '[warn] %s\n' "$*" >&2; }
fail() { printf '[fail] %s\n' "$*" >&2; exit 1; }
proxy_listening() {
if command -v nc >/dev/null 2>&1; then
nc -z -w 2 127.0.0.1 "${VERIFIER_PORT}" 2>/dev/null
else
timeout 2 bash -c "echo >/dev/tcp/127.0.0.1/${VERIFIER_PORT}" 2>/dev/null
fi
}
start_proxy_if_needed() {
if proxy_listening; then
ok "Forge verification proxy already listening on ${VERIFIER_PORT}."
return 0
fi
log "Starting forge verification proxy on ${VERIFIER_PORT} -> ${BLOCKSCOUT_URL}"
PORT="${VERIFIER_PORT}" BLOCKSCOUT_URL="${BLOCKSCOUT_URL}" node "${PROJECT_ROOT}/forge-verification-proxy/server.js" >/tmp/chain138-route-execution-blockscout-proxy.log 2>&1 &
PROXY_PID=$!
sleep 2
proxy_listening || fail "Forge verification proxy failed to start. See /tmp/chain138-route-execution-blockscout-proxy.log"
}
has_contract_bytecode() {
local addr="$1"
local code
code="$(cast code "${addr}" --rpc-url "${RPC_URL}" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]')" || true
[[ -n "${code}" && "${code}" != "0x" && "${code}" != "0x0" ]]
}
verification_status_json() {
local addr="$1"
local raw
local base
for base in "${BLOCKSCOUT_API_BASE}" "${BLOCKSCOUT_PUBLIC_API_BASE}"; do
raw="$(curl --max-time 20 -fsS "${base}/smart-contracts/${addr}" 2>/dev/null || true)"
if [[ -n "${raw}" ]] && jq -e 'type == "object"' >/dev/null 2>&1 <<<"${raw}"; then
printf '%s' "${raw}"
return 0
fi
done
return 1
}
is_verified() {
local addr="$1"
local expected_name="$2"
local json name compiler
json="$(verification_status_json "${addr}")" || return 1
name="$(jq -r '.name // empty' <<<"${json}")"
compiler="$(jq -r '.compiler_version // empty' <<<"${json}")"
[[ -n "${name}" && -n "${compiler}" && "${name}" == "${expected_name}" ]]
}
submit_verification() {
local label="$1"
local addr="$2"
local path="$3"
local expected_name="$4"
local constructor_sig="$5"
shift 5
local constructor_args=("$@")
start_proxy_if_needed
has_contract_bytecode "${addr}" || fail "${label} has no bytecode at ${addr}"
if is_verified "${addr}" "${expected_name}"; then
ok "${label} already verified on Blockscout."
return 0
fi
local cmd=(forge verify-contract "${addr}" "${path}" --chain-id 138 --verifier blockscout --verifier-url "${FORGE_VERIFIER_URL}" --rpc-url "${RPC_URL}" --flatten)
if [[ -n "${constructor_sig}" ]]; then
local encoded
encoded="$(cast abi-encode "${constructor_sig}" "${constructor_args[@]}")"
cmd+=(--constructor-args "${encoded}")
fi
log "Submitting Blockscout verification for ${label} (${addr})"
if (cd "${SMOM_SOURCE_ROOT}" && "${cmd[@]}" 2>&1); then
ok "${label} verification submission accepted."
else
warn "${label} verification submission did not complete cleanly. Check Blockscout manually."
fi
}
artifact_dbg_path() {
case "$1" in
EnhancedSwapRouterV2) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/EnhancedSwapRouterV2.sol/EnhancedSwapRouterV2.dbg.json" ;;
IntentBridgeCoordinatorV2) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/IntentBridgeCoordinatorV2.sol/IntentBridgeCoordinatorV2.dbg.json" ;;
DodoRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/adapters/DodoRouteExecutorAdapter.sol/DodoRouteExecutorAdapter.dbg.json" ;;
DodoV3RouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/adapters/DodoV3RouteExecutorAdapter.sol/DodoV3RouteExecutorAdapter.dbg.json" ;;
UniswapV3RouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/adapters/UniswapV3RouteExecutorAdapter.sol/UniswapV3RouteExecutorAdapter.dbg.json" ;;
BalancerRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/adapters/BalancerRouteExecutorAdapter.sol/BalancerRouteExecutorAdapter.dbg.json" ;;
CurveRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/adapters/CurveRouteExecutorAdapter.sol/CurveRouteExecutorAdapter.dbg.json" ;;
OneInchRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/adapters/OneInchRouteExecutorAdapter.sol/OneInchRouteExecutorAdapter.dbg.json" ;;
Chain138PilotUniswapV3Router) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotUniswapV3Router.dbg.json" ;;
Chain138PilotBalancerVault) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotBalancerVault.dbg.json" ;;
Chain138PilotCurve3Pool) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotCurve3Pool.dbg.json" ;;
Chain138PilotOneInchAggregationRouter) printf '%s' "${SMOM_SOURCE_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotOneInchAggregationRouter.dbg.json" ;;
*) return 1 ;;
esac
}
foundry_artifact_json_path() {
case "$1" in
EnhancedSwapRouterV2) printf '%s' "${SMOM_SOURCE_ROOT}/out/EnhancedSwapRouterV2.sol/EnhancedSwapRouterV2.json" ;;
IntentBridgeCoordinatorV2) printf '%s' "${SMOM_SOURCE_ROOT}/out/IntentBridgeCoordinatorV2.sol/IntentBridgeCoordinatorV2.json" ;;
DodoRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/out/DodoRouteExecutorAdapter.sol/DodoRouteExecutorAdapter.json" ;;
DodoV3RouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/out/DodoV3RouteExecutorAdapter.sol/DodoV3RouteExecutorAdapter.json" ;;
UniswapV3RouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/out/UniswapV3RouteExecutorAdapter.sol/UniswapV3RouteExecutorAdapter.json" ;;
BalancerRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/out/BalancerRouteExecutorAdapter.sol/BalancerRouteExecutorAdapter.json" ;;
CurveRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/out/CurveRouteExecutorAdapter.sol/CurveRouteExecutorAdapter.json" ;;
OneInchRouteExecutorAdapter) printf '%s' "${SMOM_SOURCE_ROOT}/out/OneInchRouteExecutorAdapter.sol/OneInchRouteExecutorAdapter.json" ;;
Chain138PilotUniswapV3Router|Chain138PilotBalancerVault|Chain138PilotCurve3Pool|Chain138PilotOneInchAggregationRouter)
printf '%s' "${SMOM_SOURCE_ROOT}/out/Chain138PilotDexVenues.sol/${1}.json"
;;
*) return 1 ;;
esac
}
route_broadcast_input() {
local label="$1"
[[ -f "${ROUTE_BROADCAST}" ]] || return 1
jq -r --arg name "${label}" '.transactions[] | select(.transactionType=="CREATE" and .contractName==$name) | .transaction.input' "${ROUTE_BROADCAST}" | head -n1
}
runtime_hash_report() {
local label="$1"
local addr="$2"
local constructor_sig="$3"
shift 3
local constructor_args=("$@")
local artifact_json artifact_runtime chain_runtime
artifact_json="$(foundry_artifact_json_path "${label}")" || return 0
[[ -f "${artifact_json}" ]] || return 0
local immutable_count
immutable_count="$(jq -r '(.deployedBytecode.immutableReferences // {}) | length' "${artifact_json}")"
if [[ "${immutable_count}" != "0" && -n "${constructor_sig}" ]]; then
local bytecode tx_input encoded candidate candidate_keccak tx_keccak
bytecode="$(jq -r '.bytecode.object // empty' "${artifact_json}" | tr '[:upper:]' '[:lower:]')"
tx_input="$(route_broadcast_input "${label}" | tr '[:upper:]' '[:lower:]')" || tx_input=""
if [[ -n "${bytecode}" && -n "${tx_input}" ]]; then
encoded="$(cast abi-encode "${constructor_sig}" "${constructor_args[@]}")"
candidate="${bytecode}${encoded#0x}"
candidate_keccak="$(cast keccak "${candidate}")"
tx_keccak="$(cast keccak "${tx_input}")"
if [[ "${candidate}" == "${tx_input}" ]]; then
ok "${label}: recovered exact historical creation bytecode/input (${candidate_keccak})."
else
warn "${label}: creation bytecode/input does not match recorded deployment transaction."
warn "${label}: candidate_keccak=${candidate_keccak} tx_input_keccak=${tx_keccak}"
fi
fi
return 0
fi
artifact_runtime="$(jq -r '.deployedBytecode.object // empty' "${artifact_json}" | tr '[:upper:]' '[:lower:]')"
chain_runtime="$(cast code "${addr}" --rpc-url "${RPC_URL}" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]')" || true
[[ -n "${artifact_runtime}" && -n "${chain_runtime}" && "${chain_runtime}" != "0x" ]] || return 0
if [[ "${artifact_runtime}" != "${chain_runtime}" ]]; then
warn "${label}: Foundry artifact runtime bytecode does not match deployed bytecode."
warn "${label}: artifact_keccak=$(cast keccak "${artifact_runtime}") chain_keccak=$(cast keccak "${chain_runtime}")"
fi
}
submit_standard_input_from_artifact() {
local label="$1"
local addr="$2"
local contract_path="$3"
local constructor_args="$4"
local dbg build input_file compiler_version evm_version optimization_runs optimization_enabled license_type response message
dbg="$(artifact_dbg_path "${label}")" || fail "${label}: missing dbg path mapping"
[[ -f "${dbg}" ]] || fail "${label}: missing dbg artifact ${dbg}"
build="$(jq -r '.buildInfo // .build_info // empty' "${dbg}")"
[[ -n "${build}" && "${build}" != "null" ]] || fail "${label}: missing build-info reference in ${dbg}"
build="$(cd "$(dirname "${dbg}")" && realpath "${build}")"
[[ -f "${build}" ]] || fail "${label}: missing build-info file ${build}"
input_file="$(mktemp)"
python3 - "${dbg}" "${build}" "${input_file}" "${contract_path%%:*}" <<'PY'
import json
import os
import posixpath
import re
import sys
dbg_path, build_path, out_path, fallback_source = sys.argv[1:5]
with open(dbg_path, "r", encoding="utf-8") as fh:
dbg = json.load(fh)
with open(build_path, "r", encoding="utf-8") as fh:
build = json.load(fh)
source_name = dbg.get("sourceName") or dbg.get("source_name") or fallback_source
if not source_name:
raise SystemExit(f"missing sourceName in {dbg_path}")
input_data = build["input"]
sources = input_data.get("sources", {})
if source_name not in sources:
raise SystemExit(f"source {source_name} missing from build-info input")
import_re = re.compile(r'import\s+(?:[^;]*?\s+from\s+)?["\']([^"\']+)["\']\s*;')
closure = set()
stack = [source_name]
while stack:
current = stack.pop()
if current in closure or current not in sources:
continue
closure.add(current)
content = sources[current].get("content", "")
for entry in import_re.findall(content):
if entry.startswith("."):
target = posixpath.normpath(posixpath.join(posixpath.dirname(current), entry))
else:
target = entry
if target in sources and target not in closure:
stack.append(target)
reduced = json.loads(json.dumps(input_data))
reduced["sources"] = {name: sources[name] for name in sorted(closure)}
with open(out_path, "w", encoding="utf-8") as fh:
json.dump(reduced, fh, separators=(",", ":"))
PY
compiler_version="$(jq -r '.solcLongVersion | "v" + .' "${build}")"
evm_version="$(jq -r '.input.settings.evmVersion // "default"' "${build}")"
optimization_runs="$(jq -r '.input.settings.optimizer.runs // 200' "${build}")"
optimization_enabled="$(jq -r '.input.settings.optimizer.enabled // true' "${build}")"
license_type="mit"
response="$(
curl --max-time 180 -fsS -X POST \
-F "compiler_version=${compiler_version}" \
-F "contract_name=${contract_path}" \
-F "autodetect_constructor_args=false" \
-F "constructor_args=${constructor_args}" \
-F "optimization_runs=${optimization_runs}" \
-F "is_optimization_enabled=${optimization_enabled}" \
-F "evm_version=${evm_version}" \
-F "license_type=${license_type}" \
-F "files[0]=@${input_file};type=application/json" \
"${BLOCKSCOUT_URL}/api/v2/smart-contracts/${addr}/verification/via/standard-input"
)" || {
rm -f "${input_file}"
fail "${label}: Blockscout standard-input submission failed."
}
rm -f "${input_file}"
message="$(jq -r '.message // empty' <<<"${response}")"
if [[ "${message}" == "Smart-contract verification started" || "${message}" == "Already verified" ]]; then
ok "${label} standard-input verification submission accepted."
return 0
fi
warn "${label} standard-input verification returned: ${response}"
return 1
}
submit_standard_input_from_forge() {
local label="$1"
local addr="$2"
local contract_path="$3"
local constructor_sig="$4"
shift 4
local constructor_args_raw=("$@")
local constructor_args=""
local input_file response message
local compiler_version evm_version optimization_runs via_ir_flag artifact_json
compiler_version="${ROUTE_STACK_SOLC_VERSION}"
evm_version="${ROUTE_STACK_EVM_VERSION}"
optimization_runs="${ROUTE_STACK_OPT_RUNS}"
via_ir_flag=(--via-ir)
artifact_json="$(foundry_artifact_json_path "${label}" || true)"
if [[ -n "${artifact_json}" && -f "${artifact_json}" ]]; then
compiler_version="v$(jq -r '.metadata.compiler.version // empty' "${artifact_json}")"
evm_version="$(jq -r '.metadata.settings.evmVersion // "default"' "${artifact_json}")"
optimization_runs="$(jq -r '.metadata.settings.optimizer.runs // 200' "${artifact_json}")"
if [[ "$(jq -r '.metadata.settings.viaIR // false' "${artifact_json}")" != "true" ]]; then
via_ir_flag=()
fi
fi
if [[ -n "${constructor_sig}" ]]; then
constructor_args="$(cast abi-encode "${constructor_sig}" "${constructor_args_raw[@]}")"
fi
runtime_hash_report "${label}" "${addr}" "${constructor_sig}" "${constructor_args_raw[@]}"
input_file="$(mktemp)"
(
cd "${SMOM_SOURCE_ROOT}"
forge verify-contract "${addr}" "${contract_path}" \
--chain-id 138 \
--root . \
--compiler-version "${compiler_version}" \
--num-of-optimizations "${optimization_runs}" \
"${via_ir_flag[@]}" \
--evm-version "${evm_version}" \
--show-standard-json-input >"${input_file}"
) || {
rm -f "${input_file}"
fail "${label}: failed to render Foundry standard-input from deployment sources."
}
response="$(
curl --max-time 180 -fsS -X POST \
-F "compiler_version=${compiler_version}" \
-F "contract_name=${contract_path}" \
-F "autodetect_constructor_args=false" \
-F "constructor_args=${constructor_args}" \
-F "optimization_runs=${optimization_runs}" \
-F "is_optimization_enabled=true" \
-F "evm_version=${evm_version}" \
-F "license_type=mit" \
-F "files[0]=@${input_file};type=application/json" \
"${BLOCKSCOUT_URL}/api/v2/smart-contracts/${addr}/verification/via/standard-input"
)" || {
rm -f "${input_file}"
fail "${label}: Blockscout Foundry standard-input submission failed."
}
rm -f "${input_file}"
message="$(jq -r '.message // empty' <<<"${response}")"
if [[ "${message}" == "Smart-contract verification started" || "${message}" == "Already verified" ]]; then
ok "${label} Foundry standard-input verification submission accepted."
return 0
fi
warn "${label} Foundry standard-input verification returned: ${response}"
return 1
}
submit_best_verification() {
local label="$1"
local addr="$2"
local path="$3"
local expected_name="$4"
local constructor_sig="$5"
shift 5
local constructor_args=("$@")
local encoded=""
if [[ -n "${constructor_sig}" ]]; then
encoded="$(cast abi-encode "${constructor_sig}" "${constructor_args[@]}")"
fi
# Prefer the Foundry deployment lineage for the route stack. The earlier
# Hardhat dbg/build-info path drifted away from the actual deployed compiler/EVM
# settings and is kept only as a compatibility fallback.
if submit_standard_input_from_forge "${label}" "${addr}" "${path}" "${constructor_sig}" "${constructor_args[@]}"; then
return 0
fi
if artifact_dbg_path "${label}" >/dev/null 2>&1; then
warn "${label}: falling back to artifact-derived standard-input after Foundry mismatch."
submit_standard_input_from_artifact "${label}" "${addr}" "${path}" "${encoded}" || return 1
return 0
fi
warn "${label}: falling back to legacy Forge flattened verification path."
submit_verification "${label}" "${addr}" "${path}" "${expected_name}" "${constructor_sig}" "${constructor_args[@]}"
}
WETH="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
USDT="0x004b63A7B5b0E06f6bB6adb4a5F9f590BF3182D1"
USDC="0x71D6687F38b93CCad569Fa6352c876eea967201b"
DAI_PLACEHOLDER="0x6B175474E89094C44Da98b954EedeAC495271d0F"
ROUTER_V2="$(jq -r '.chains["138"].contracts.EnhancedSwapRouterV2' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
COORDINATOR_V2="$(jq -r '.chains["138"].contracts.IntentBridgeCoordinatorV2' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
DODO_ADAPTER="$(jq -r '.chains["138"].contracts.DodoRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
DODO_V3_ADAPTER="$(jq -r '.chains["138"].contracts.DodoV3RouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
UNISWAP_V3_ADAPTER="$(jq -r '.chains["138"].contracts.UniswapV3RouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
BALANCER_ADAPTER="$(jq -r '.chains["138"].contracts.BalancerRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
CURVE_ADAPTER="$(jq -r '.chains["138"].contracts.CurveRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
ONEINCH_ADAPTER="$(jq -r '.chains["138"].contracts.OneInchRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
PILOT_UNISWAP="$(jq -r '.chains["138"].contracts.PilotUniswapV3Router' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
PILOT_BALANCER="$(jq -r '.chains["138"].contracts.PilotBalancerVault' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
PILOT_CURVE="$(jq -r '.chains["138"].contracts.PilotCurve3Pool' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
PILOT_ONEINCH="$(jq -r '.chains["138"].contracts.PilotOneInchRouter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
log "Chain 138 route execution stack Blockscout verification"
log "RPC: ${RPC_URL}"
log "Explorer API: ${BLOCKSCOUT_API_BASE}"
if [[ -f "${ROUTE_BROADCAST}" ]]; then
log "Route broadcast commit: $(jq -r '.commit' "${ROUTE_BROADCAST}")"
log "Route broadcast timestamp: $(jq -r '.timestamp' "${ROUTE_BROADCAST}")"
fi
log
if (( STATUS_ONLY )); then
for pair in \
"EnhancedSwapRouterV2:${ROUTER_V2}:EnhancedSwapRouterV2" \
"IntentBridgeCoordinatorV2:${COORDINATOR_V2}:IntentBridgeCoordinatorV2" \
"DodoRouteExecutorAdapter:${DODO_ADAPTER}:DodoRouteExecutorAdapter" \
"DodoV3RouteExecutorAdapter:${DODO_V3_ADAPTER}:DodoV3RouteExecutorAdapter" \
"UniswapV3RouteExecutorAdapter:${UNISWAP_V3_ADAPTER}:UniswapV3RouteExecutorAdapter" \
"BalancerRouteExecutorAdapter:${BALANCER_ADAPTER}:BalancerRouteExecutorAdapter" \
"CurveRouteExecutorAdapter:${CURVE_ADAPTER}:CurveRouteExecutorAdapter" \
"OneInchRouteExecutorAdapter:${ONEINCH_ADAPTER}:OneInchRouteExecutorAdapter" \
"Chain138PilotUniswapV3Router:${PILOT_UNISWAP}:Chain138PilotUniswapV3Router" \
"Chain138PilotBalancerVault:${PILOT_BALANCER}:Chain138PilotBalancerVault" \
"Chain138PilotCurve3Pool:${PILOT_CURVE}:Chain138PilotCurve3Pool" \
"Chain138PilotOneInchAggregationRouter:${PILOT_ONEINCH}:Chain138PilotOneInchAggregationRouter"
do
IFS=":" read -r label addr expected <<<"${pair}"
should_handle "${label}" || continue
if is_verified "${addr}" "${expected}"; then
ok "${label} already verified on Blockscout."
else
warn "${label} not yet verified on Blockscout."
fi
done
exit 0
fi
should_handle "EnhancedSwapRouterV2" && submit_best_verification \
"EnhancedSwapRouterV2" \
"${ROUTER_V2}" \
"contracts/bridge/trustless/EnhancedSwapRouterV2.sol:EnhancedSwapRouterV2" \
"EnhancedSwapRouterV2" \
"constructor(address,address,address,address)" \
"${WETH}" "${USDT}" "${USDC}" "${DAI_PLACEHOLDER}"
should_handle "IntentBridgeCoordinatorV2" && submit_best_verification \
"IntentBridgeCoordinatorV2" \
"${COORDINATOR_V2}" \
"contracts/bridge/trustless/IntentBridgeCoordinatorV2.sol:IntentBridgeCoordinatorV2" \
"IntentBridgeCoordinatorV2" \
"constructor(address)" \
"${ROUTER_V2}"
should_handle "DodoRouteExecutorAdapter" && submit_best_verification \
"DodoRouteExecutorAdapter" \
"${DODO_ADAPTER}" \
"contracts/bridge/trustless/adapters/DodoRouteExecutorAdapter.sol:DodoRouteExecutorAdapter" \
"DodoRouteExecutorAdapter" \
""
should_handle "DodoV3RouteExecutorAdapter" && submit_best_verification \
"DodoV3RouteExecutorAdapter" \
"${DODO_V3_ADAPTER}" \
"contracts/bridge/trustless/adapters/DodoV3RouteExecutorAdapter.sol:DodoV3RouteExecutorAdapter" \
"DodoV3RouteExecutorAdapter" \
""
should_handle "UniswapV3RouteExecutorAdapter" && submit_best_verification \
"UniswapV3RouteExecutorAdapter" \
"${UNISWAP_V3_ADAPTER}" \
"contracts/bridge/trustless/adapters/UniswapV3RouteExecutorAdapter.sol:UniswapV3RouteExecutorAdapter" \
"UniswapV3RouteExecutorAdapter" \
""
should_handle "BalancerRouteExecutorAdapter" && submit_best_verification \
"BalancerRouteExecutorAdapter" \
"${BALANCER_ADAPTER}" \
"contracts/bridge/trustless/adapters/BalancerRouteExecutorAdapter.sol:BalancerRouteExecutorAdapter" \
"BalancerRouteExecutorAdapter" \
""
should_handle "CurveRouteExecutorAdapter" && submit_best_verification \
"CurveRouteExecutorAdapter" \
"${CURVE_ADAPTER}" \
"contracts/bridge/trustless/adapters/CurveRouteExecutorAdapter.sol:CurveRouteExecutorAdapter" \
"CurveRouteExecutorAdapter" \
""
should_handle "OneInchRouteExecutorAdapter" && submit_best_verification \
"OneInchRouteExecutorAdapter" \
"${ONEINCH_ADAPTER}" \
"contracts/bridge/trustless/adapters/OneInchRouteExecutorAdapter.sol:OneInchRouteExecutorAdapter" \
"OneInchRouteExecutorAdapter" \
""
should_handle "Chain138PilotUniswapV3Router" && submit_best_verification \
"Chain138PilotUniswapV3Router" \
"${PILOT_UNISWAP}" \
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotUniswapV3Router" \
"Chain138PilotUniswapV3Router" \
""
should_handle "Chain138PilotBalancerVault" && submit_best_verification \
"Chain138PilotBalancerVault" \
"${PILOT_BALANCER}" \
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotBalancerVault" \
"Chain138PilotBalancerVault" \
""
should_handle "Chain138PilotCurve3Pool" && submit_best_verification \
"Chain138PilotCurve3Pool" \
"${PILOT_CURVE}" \
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotCurve3Pool" \
"Chain138PilotCurve3Pool" \
"constructor(address,address,address,uint256)" \
"${USDT}" "${USDC}" "0x0000000000000000000000000000000000000000" "4"
should_handle "Chain138PilotOneInchAggregationRouter" && submit_best_verification \
"Chain138PilotOneInchAggregationRouter" \
"${PILOT_ONEINCH}" \
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotOneInchAggregationRouter" \
"Chain138PilotOneInchAggregationRouter" \
""
if (( NO_WAIT )); then
log
ok "Chain 138 route execution stack verification submissions complete."
exit 0
fi
log
ok "Chain 138 route execution stack verification flow complete."