Compare commits
15 Commits
docs/explo
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48d3e3f761 | ||
|
|
bed94a3ad4 | ||
|
|
7be2190441 | ||
|
|
19cb7fe8b5 | ||
|
|
f1715fb684 | ||
|
|
6a9f5dead0 | ||
|
|
770a1db99a | ||
|
|
8868a3501f | ||
|
|
cf96e9d821 | ||
|
|
4c4aa28c95 | ||
|
|
9306a65186 | ||
|
|
7ab231c4ce | ||
|
|
b58c3a0342 | ||
|
|
b8e735dcac | ||
|
|
3bea587e12 |
@@ -6,6 +6,10 @@
|
||||
2. Make changes, ensure tests pass
|
||||
3. Open a pull request
|
||||
|
||||
Deploy workflow policy:
|
||||
`main` and `master` are both deploy-triggering branches, so `.gitea/workflow-sources/deploy-to-phoenix.yml` and `.gitea/workflow-sources/validate-on-pr.yml` must stay identical across both branches.
|
||||
Use `bash scripts/verify/sync-gitea-workflows.sh` after editing workflow-source files, and `bash scripts/verify/run-all-validation.sh --skip-genesis` to catch workflow drift before push.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
- Use the PR template when opening a PR
|
||||
|
||||
125
.gitea/workflow-sources/deploy-to-phoenix.yml
Normal file
125
.gitea/workflow-sources/deploy-to-phoenix.yml
Normal file
@@ -0,0 +1,125 @@
|
||||
# Canonical deploy workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
name: Deploy to Phoenix
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
|
||||
# The cW* mesh matrix and deployment-status validators read
|
||||
# cross-chain-pmm-lps/config/*.json. The parent checkout does not
|
||||
# materialize submodules by default, and .gitmodules mixes public HTTPS
|
||||
# with SSH URLs, so clone only the required public validation dependency.
|
||||
- name: Materialize cross-chain-pmm-lps
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ ! -f cross-chain-pmm-lps/config/deployment-status.json ]; then
|
||||
rm -rf cross-chain-pmm-lps
|
||||
git clone --depth=1 \
|
||||
https://gitea.d-bis.org/d-bis/cross-chain-pmm-lps.git \
|
||||
cross-chain-pmm-lps
|
||||
fi
|
||||
|
||||
- name: Run repo validation gate
|
||||
run: |
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
|
||||
deploy:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Trigger Phoenix deployment
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
set +e
|
||||
curl -sSf --retry 3 --retry-connrefused --retry-delay 10 --retry-max-time 180 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"default\"}"
|
||||
rc="$?"
|
||||
set -e
|
||||
if [ "$rc" -eq 52 ]; then
|
||||
HEALTH_URL="${{ secrets.PHOENIX_DEPLOY_URL }}"
|
||||
HEALTH_URL="${HEALTH_URL%/api/deploy}/health"
|
||||
echo "Phoenix deploy API restarted during self-deploy; verifying ${HEALTH_URL}"
|
||||
for i in $(seq 1 12); do
|
||||
if curl -fsS --max-time 5 "$HEALTH_URL"; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
exit "$rc"
|
||||
|
||||
deploy-atomic-swap-dapp:
|
||||
needs: deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Trigger Atomic Swap dApp deployment (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf \
|
||||
--connect-timeout 10 --max-time 900 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"atomic-swap-dapp-live\"}"
|
||||
|
||||
# After app deploy, ask Phoenix to run path-gated Cloudflare DNS sync on the host that has
|
||||
# PHOENIX_REPO_ROOT + .env (not on this runner). Skips unless PHOENIX_CLOUDFLARE_SYNC=1 on that host.
|
||||
# continue-on-error: first-time or missing opt-in should not block the main deploy.
|
||||
cloudflare:
|
||||
needs:
|
||||
- deploy
|
||||
- deploy-atomic-swap-dapp
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Request Cloudflare DNS sync (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf --retry 5 --retry-all-errors --retry-connrefused --retry-delay 10 --retry-max-time 300 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"cloudflare-sync\"}" \
|
||||
|| { echo "Cloudflare DNS sync request failed; optional sync is non-blocking."; exit 0; }
|
||||
33
.gitea/workflow-sources/validate-on-pr.yml
Normal file
33
.gitea/workflow-sources/validate-on-pr.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
# Canonical PR validation workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
# PR-only: push validation already runs in deploy-to-phoenix.yml; this gives PRs the same
|
||||
# no-LAN checks without the deploy job (and without deploy secrets).
|
||||
name: Validate (PR)
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
run-all-validation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
# Optional: set org/repo variable URA_STRICT_CLOSURE=1 to fail PRs while pilot placeholders
|
||||
# remain in manifest (see scripts/ura/validate-manifest-closure.mjs). Not enabled by default.
|
||||
- name: run-all-validation (no LAN, no genesis)
|
||||
env:
|
||||
URA_STRICT_CLOSURE: ${{ vars.URA_STRICT_CLOSURE }}
|
||||
run: bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
@@ -1,11 +1,52 @@
|
||||
# Canonical deploy workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
name: Deploy to Phoenix
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
|
||||
# The cW* mesh matrix and deployment-status validators read
|
||||
# cross-chain-pmm-lps/config/*.json. The parent checkout does not
|
||||
# materialize submodules by default, and .gitmodules mixes public HTTPS
|
||||
# with SSH URLs, so clone only the required public validation dependency.
|
||||
- name: Materialize cross-chain-pmm-lps
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ ! -f cross-chain-pmm-lps/config/deployment-status.json ]; then
|
||||
rm -rf cross-chain-pmm-lps
|
||||
git clone --depth=1 \
|
||||
https://gitea.d-bis.org/d-bis/cross-chain-pmm-lps.git \
|
||||
cross-chain-pmm-lps
|
||||
fi
|
||||
|
||||
- name: Run repo validation gate
|
||||
run: |
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
|
||||
deploy:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -13,8 +54,72 @@ jobs:
|
||||
|
||||
- name: Trigger Phoenix deployment
|
||||
run: |
|
||||
curl -sSf -X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
set +e
|
||||
curl -sSf --retry 3 --retry-connrefused --retry-delay 10 --retry-max-time 180 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${{ gitea.sha }}\",\"branch\":\"${{ gitea.ref_name }}\"}"
|
||||
continue-on-error: true
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"default\"}"
|
||||
rc="$?"
|
||||
set -e
|
||||
if [ "$rc" -eq 52 ]; then
|
||||
HEALTH_URL="${{ secrets.PHOENIX_DEPLOY_URL }}"
|
||||
HEALTH_URL="${HEALTH_URL%/api/deploy}/health"
|
||||
echo "Phoenix deploy API restarted during self-deploy; verifying ${HEALTH_URL}"
|
||||
for i in $(seq 1 12); do
|
||||
if curl -fsS --max-time 5 "$HEALTH_URL"; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
exit "$rc"
|
||||
|
||||
deploy-atomic-swap-dapp:
|
||||
needs: deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Trigger Atomic Swap dApp deployment (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf \
|
||||
--connect-timeout 10 --max-time 900 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"atomic-swap-dapp-live\"}"
|
||||
|
||||
# After app deploy, ask Phoenix to run path-gated Cloudflare DNS sync on the host that has
|
||||
# PHOENIX_REPO_ROOT + .env (not on this runner). Skips unless PHOENIX_CLOUDFLARE_SYNC=1 on that host.
|
||||
# continue-on-error: first-time or missing opt-in should not block the main deploy.
|
||||
cloudflare:
|
||||
needs:
|
||||
- deploy
|
||||
- deploy-atomic-swap-dapp
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Request Cloudflare DNS sync (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf --retry 5 --retry-all-errors --retry-connrefused --retry-delay 10 --retry-max-time 300 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"cloudflare-sync\"}" \
|
||||
|| { echo "Cloudflare DNS sync request failed; optional sync is non-blocking."; exit 0; }
|
||||
|
||||
33
.gitea/workflows/validate-on-pr.yml
Normal file
33
.gitea/workflows/validate-on-pr.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
# Canonical PR validation workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
# PR-only: push validation already runs in deploy-to-phoenix.yml; this gives PRs the same
|
||||
# no-LAN checks without the deploy job (and without deploy secrets).
|
||||
name: Validate (PR)
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
run-all-validation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
# Optional: set org/repo variable URA_STRICT_CLOSURE=1 to fail PRs while pilot placeholders
|
||||
# remain in manifest (see scripts/ura/validate-manifest-closure.mjs). Not enabled by default.
|
||||
- name: run-all-validation (no LAN, no genesis)
|
||||
env:
|
||||
URA_STRICT_CLOSURE: ${{ vars.URA_STRICT_CLOSURE }}
|
||||
run: bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
@@ -2076,10 +2076,10 @@
|
||||
"baseSymbol": "cWETH",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd012000000000000000000000000000000000001",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_mainnet",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2091,10 +2091,10 @@
|
||||
"baseSymbol": "cWETH",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd011000000000000000000000000000000000001",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_mainnet",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2150,10 +2150,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd02200000000000000000000000000000000000a",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2165,10 +2165,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd02100000000000000000000000000000000000a",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2246,10 +2246,10 @@
|
||||
"baseSymbol": "cWXDAI",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd072000000000000000000000000000000000064",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "xdai",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2261,10 +2261,10 @@
|
||||
"baseSymbol": "cWXDAI",
|
||||
"quoteSymbol": "WXDAI",
|
||||
"poolAddress": "0xd071000000000000000000000000000000000064",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "xdai",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2276,10 +2276,10 @@
|
||||
"baseSymbol": "cWWEMIX",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd092000000000000000000000000000000000457",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "wemix",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2291,10 +2291,10 @@
|
||||
"baseSymbol": "cWWEMIX",
|
||||
"quoteSymbol": "WWEMIX",
|
||||
"poolAddress": "0xd091000000000000000000000000000000000457",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "wemix",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2339,10 +2339,10 @@
|
||||
"baseSymbol": "cWPOL",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd042000000000000000000000000000000000089",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "pol",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2354,10 +2354,10 @@
|
||||
"baseSymbol": "cWPOL",
|
||||
"quoteSymbol": "WPOL",
|
||||
"poolAddress": "0xd041000000000000000000000000000000000089",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "pol",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2413,10 +2413,10 @@
|
||||
"baseSymbol": "cWCRO",
|
||||
"quoteSymbol": "USDT",
|
||||
"poolAddress": "0xd062000000000000000000000000000000000019",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "cro",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2428,10 +2428,10 @@
|
||||
"baseSymbol": "cWCRO",
|
||||
"quoteSymbol": "WCRO",
|
||||
"poolAddress": "0xd061000000000000000000000000000000000019",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "cro",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2487,10 +2487,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd02200000000000000000000000000000000a4b1",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2502,10 +2502,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd02100000000000000000000000000000000a4b1",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2572,10 +2572,10 @@
|
||||
"baseSymbol": "cWCELO",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd08200000000000000000000000000000000a4ec",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "celo",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2587,10 +2587,10 @@
|
||||
"baseSymbol": "cWCELO",
|
||||
"quoteSymbol": "WCELO",
|
||||
"poolAddress": "0xd08100000000000000000000000000000000a4ec",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "celo",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2635,10 +2635,10 @@
|
||||
"baseSymbol": "cWAVAX",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd05200000000000000000000000000000000a86a",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "avax",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2650,10 +2650,10 @@
|
||||
"baseSymbol": "cWAVAX",
|
||||
"quoteSymbol": "WAVAX",
|
||||
"poolAddress": "0xd05100000000000000000000000000000000a86a",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "avax",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2720,10 +2720,10 @@
|
||||
"baseSymbol": "cWBNB",
|
||||
"quoteSymbol": "USDT",
|
||||
"poolAddress": "0xd032000000000000000000000000000000000038",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "bnb",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2735,10 +2735,10 @@
|
||||
"baseSymbol": "cWBNB",
|
||||
"quoteSymbol": "WBNB",
|
||||
"poolAddress": "0xd031000000000000000000000000000000000038",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "bnb",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2816,10 +2816,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd022000000000000000000000000000000002105",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2831,10 +2831,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd021000000000000000000000000000000002105",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
|
||||
@@ -1936,7 +1936,7 @@
|
||||
"key": "Compliant_WEMIX_cW",
|
||||
"name": "cWEMIX->cWWEMIX",
|
||||
"addressFrom": "0x4d82206bec5b4dfa17759ffede07e35f4f63a050",
|
||||
"addressTo": "0xc111000000000000000000000000000000000457",
|
||||
"addressTo": "0x4c38f9a5ed68a04cd28a72e8c68c459ec34576f3",
|
||||
"notes": "Wave 1 gas-family lane wemix: Chain 138 cWEMIX -> Wemix cWWEMIX. hybrid_cap backing with uniswap_v3 reference pricing and DODO PMM edge liquidity."
|
||||
}
|
||||
]
|
||||
|
||||
@@ -59,32 +59,6 @@ pct exec 5000 -- bash -c 'cd /opt/blockscout && docker-compose up -d blockscout'
|
||||
|
||||
---
|
||||
|
||||
## Fix: Smart-Contract Verifier Sidecar Missing
|
||||
|
||||
**Symptom:** verification endpoints exist and submissions appear accepted, but deployed contracts remain `bytecode-only` and never promote into full source metadata.
|
||||
|
||||
**Root cause:** CT `5000` is running only `blockscout` and `postgres`, without the upstream `smart-contract-verifier` sidecar and without the required verifier wiring env:
|
||||
|
||||
- `MICROSERVICE_SC_VERIFIER_ENABLED=true`
|
||||
- `MICROSERVICE_SC_VERIFIER_TYPE=sc_verifier`
|
||||
- `MICROSERVICE_SC_VERIFIER_URL=http://smart-contract-verifier:8050/`
|
||||
|
||||
**Recommended fix:**
|
||||
|
||||
```bash
|
||||
bash scripts/deployment/ensure-blockscout-smart-contract-verifier-5000.sh --dry-run
|
||||
bash scripts/deployment/ensure-blockscout-smart-contract-verifier-5000.sh --apply
|
||||
```
|
||||
|
||||
The script:
|
||||
1. Backs up `/opt/blockscout/docker-compose.yml`
|
||||
2. Adds the upstream `smart-contract-verifier` sidecar
|
||||
3. Wires Blockscout to the sidecar with `MICROSERVICE_SC_VERIFIER_*`
|
||||
4. Restarts the stack cleanly
|
||||
5. Verifies `/api/v2/smart-contracts/verification/config`
|
||||
|
||||
---
|
||||
|
||||
## Fix: Migrate VM 5000 to thin5 (has free space)
|
||||
|
||||
**Run on Proxmox host r630-02 (192.168.11.12):**
|
||||
@@ -135,8 +109,6 @@ source smom-dbis-138/.env 2>/dev/null
|
||||
./scripts/verify/run-contract-verification-with-proxy.sh
|
||||
```
|
||||
|
||||
**Important:** native `Uniswap v2` / `SushiSwap` verification should pin the exact historical compiler version. The repo submitter now does that explicitly so Forge defaults do not silently downgrade the verification attempt.
|
||||
|
||||
---
|
||||
|
||||
## Forge Verification Compatibility
|
||||
|
||||
217
docs/04-configuration/DEVIN_GITEA_PROXMOX_CICD.md
Normal file
217
docs/04-configuration/DEVIN_GITEA_PROXMOX_CICD.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# Devin → Gitea → Proxmox CI/CD
|
||||
|
||||
**Status:** Working baseline for this repo
|
||||
**Last Updated:** 2026-04-20
|
||||
|
||||
## Goal
|
||||
|
||||
Create a repeatable path where:
|
||||
|
||||
1. Devin lands code in Gitea.
|
||||
2. Gitea Actions validates the repo on the site-wide `act_runner`.
|
||||
3. A successful workflow calls `phoenix-deploy-api`.
|
||||
4. `phoenix-deploy-api` resolves the repo/branch to a deploy target and runs the matching Proxmox publish command.
|
||||
5. The deploy service checks the target health URL before it reports success.
|
||||
|
||||
## Current baseline in this repo
|
||||
|
||||
The path now exists for **`d-bis/proxmox`** on **`main`** and **`master`**:
|
||||
|
||||
- Canonical workflow sources: [.gitea/workflow-sources/deploy-to-phoenix.yml](/home/intlc/projects/proxmox/.gitea/workflow-sources/deploy-to-phoenix.yml) and [.gitea/workflow-sources/validate-on-pr.yml](/home/intlc/projects/proxmox/.gitea/workflow-sources/validate-on-pr.yml)
|
||||
- Workflow: [deploy-to-phoenix.yml](/home/intlc/projects/proxmox/.gitea/workflows/deploy-to-phoenix.yml)
|
||||
- Manual app workflow: [deploy-portal-live.yml](/home/intlc/projects/proxmox/.gitea/workflows/deploy-portal-live.yml)
|
||||
- Deploy service: [server.js](/home/intlc/projects/proxmox/phoenix-deploy-api/server.js)
|
||||
- Target map: [deploy-targets.json](/home/intlc/projects/proxmox/phoenix-deploy-api/deploy-targets.json)
|
||||
- Current live publish script: [deploy-phoenix-deploy-api-to-dev-vm.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh)
|
||||
- Manual smoke trigger: [trigger-phoenix-deploy.sh](/home/intlc/projects/proxmox/scripts/dev-vm/trigger-phoenix-deploy.sh)
|
||||
- Target validator: [validate-phoenix-deploy-targets.sh](/home/intlc/projects/proxmox/scripts/validation/validate-phoenix-deploy-targets.sh)
|
||||
- Bootstrap helper: [bootstrap-phoenix-cicd.sh](/home/intlc/projects/proxmox/scripts/dev-vm/bootstrap-phoenix-cicd.sh)
|
||||
|
||||
That default target publishes the `phoenix-deploy-api` bundle to **VMID 5700** on the correct Proxmox node and starts the CT if needed.
|
||||
|
||||
A second target is now available:
|
||||
|
||||
- `portal-live` → runs [sync-sankofa-portal-7801.sh](/home/intlc/projects/proxmox/scripts/deployment/sync-sankofa-portal-7801.sh) and then checks `http://192.168.11.51:3000/`
|
||||
|
||||
## Workflow lockstep
|
||||
|
||||
Because both `main` and `master` can trigger deploys, deploy behavior is now defined from canonical source files and checked for branch parity.
|
||||
|
||||
- Edit only the source files under [.gitea/workflow-sources](/home/intlc/projects/proxmox/.gitea/workflow-sources:1)
|
||||
- Sync the checked-in workflow copies with:
|
||||
|
||||
```bash
|
||||
bash scripts/verify/sync-gitea-workflows.sh
|
||||
```
|
||||
|
||||
- Validate source sync plus `main`/`master` parity with:
|
||||
|
||||
```bash
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
```
|
||||
|
||||
The deploy and PR workflows both fetch `origin/main` and `origin/master` before validation, so branch drift now fails CI instead of silently changing deploy behavior.
|
||||
|
||||
## Flow
|
||||
|
||||
```text
|
||||
Devin
|
||||
-> push to Gitea
|
||||
-> Gitea Actions on act_runner (5700)
|
||||
-> bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
-> validates deploy-targets.json structure
|
||||
-> POST /api/deploy to phoenix-deploy-api
|
||||
-> match repo + branch + target in deploy-targets.json
|
||||
-> run deploy command
|
||||
-> verify target health URL
|
||||
-> update Gitea commit status success/failure
|
||||
```
|
||||
|
||||
## Required setup
|
||||
|
||||
### 1. Runner
|
||||
|
||||
Bring up the site-wide Gitea runner on VMID **5700**:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/bootstrap-gitea-act-runner-site-wide.sh
|
||||
```
|
||||
|
||||
Reference: [GITEA_ACT_RUNNER_SETUP.md](GITEA_ACT_RUNNER_SETUP.md)
|
||||
|
||||
### 0. One-command bootstrap
|
||||
|
||||
If root `.env` already contains the needed values, use:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/bootstrap-phoenix-cicd.sh --repo d-bis/proxmox
|
||||
```
|
||||
|
||||
This runs the validation gate, deploys `phoenix-deploy-api`, and smoke-checks the service.
|
||||
|
||||
### 2. Deploy API service
|
||||
|
||||
Deploy the API to the dev VM:
|
||||
|
||||
```bash
|
||||
./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --dry-run
|
||||
./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --apply --start-ct
|
||||
```
|
||||
|
||||
On the target VM, set at least:
|
||||
|
||||
```bash
|
||||
PORT=4001
|
||||
GITEA_URL=https://gitea.d-bis.org
|
||||
GITEA_TOKEN=<token with repo status access>
|
||||
PHOENIX_DEPLOY_SECRET=<shared secret>
|
||||
PHOENIX_REPO_ROOT=/home/intlc/projects/proxmox
|
||||
```
|
||||
|
||||
Optional:
|
||||
|
||||
```bash
|
||||
DEPLOY_TARGETS_PATH=/opt/phoenix-deploy-api/deploy-targets.json
|
||||
```
|
||||
|
||||
For the `portal-live` target, also set:
|
||||
|
||||
```bash
|
||||
SANKOFA_PORTAL_SRC=/home/intlc/projects/Sankofa/portal
|
||||
```
|
||||
|
||||
### 3. Gitea repo secrets
|
||||
|
||||
Set these in the Gitea repository that should deploy:
|
||||
|
||||
- `PHOENIX_DEPLOY_URL`
|
||||
- `PHOENIX_DEPLOY_TOKEN`
|
||||
|
||||
Example:
|
||||
|
||||
- `PHOENIX_DEPLOY_URL=http://192.168.11.59:4001/api/deploy`
|
||||
- `PHOENIX_DEPLOY_TOKEN=<same value as PHOENIX_DEPLOY_SECRET>`
|
||||
|
||||
For webhook signing, the bootstrap/helper path also expects:
|
||||
|
||||
- `PHOENIX_DEPLOY_SECRET`
|
||||
- `PHOENIX_WEBHOOK_DEPLOY_ENABLED=1` only if you want webhook events themselves to execute deploys
|
||||
|
||||
Do not enable both repo Actions deploys and webhook deploys for the same repo unless you intentionally want duplicate deploy attempts.
|
||||
|
||||
## Adding more repos or VM targets
|
||||
|
||||
Extend [deploy-targets.json](/home/intlc/projects/proxmox/phoenix-deploy-api/deploy-targets.json) with another entry.
|
||||
|
||||
Each target is keyed by:
|
||||
|
||||
- `repo`
|
||||
- `branch`
|
||||
- `target`
|
||||
|
||||
Each target defines:
|
||||
|
||||
- `cwd`
|
||||
- `command`
|
||||
- `required_env`
|
||||
- optional `healthcheck`
|
||||
- optional `timeout_sec`
|
||||
|
||||
Example shape:
|
||||
|
||||
```json
|
||||
{
|
||||
"repo": "d-bis/another-service",
|
||||
"branch": "main",
|
||||
"target": "portal-live",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": ["bash", "scripts/deployment/sync-sankofa-portal-7801.sh"],
|
||||
"required_env": ["PHOENIX_REPO_ROOT"]
|
||||
}
|
||||
```
|
||||
|
||||
Use separate `target` names when the same repo can publish to different VMIDs or environments.
|
||||
|
||||
Target-map validation is already part of:
|
||||
|
||||
```bash
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
```
|
||||
|
||||
and can also be run directly:
|
||||
|
||||
```bash
|
||||
bash scripts/validation/validate-phoenix-deploy-targets.sh
|
||||
```
|
||||
|
||||
## Manual testing
|
||||
|
||||
Before trusting a new Gitea workflow, trigger the deploy service directly:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/trigger-phoenix-deploy.sh
|
||||
```
|
||||
|
||||
Trigger the live portal deployment target directly:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/trigger-phoenix-deploy.sh d-bis/proxmox main portal-live
|
||||
```
|
||||
|
||||
Inspect configured targets:
|
||||
|
||||
```bash
|
||||
curl -s http://192.168.11.59:4001/api/deploy-targets | jq .
|
||||
```
|
||||
|
||||
## Recommended next expansions
|
||||
|
||||
- Add a Phoenix API target for the repo that owns VMID **7800** or **8600**, depending on which deployment line is canonical.
|
||||
- Add repo-specific workflows once the Sankofa source repos themselves are mirrored into Gitea Actions.
|
||||
- Move secret values from ad hoc `.env` files into the final operator-managed secret source once you settle the production host for `phoenix-deploy-api`.
|
||||
|
||||
## Notes
|
||||
|
||||
- The Gitea workflow is gated by `scripts/verify/run-all-validation.sh --skip-genesis` before deploy.
|
||||
- `phoenix-deploy-api` now returns `404` when no matching target exists and `500` when the deploy command fails.
|
||||
- Commit status updates are written back to Gitea from the deploy service itself.
|
||||
@@ -1,6 +1,6 @@
|
||||
# Contract Verification And Publication Matrix (All Networks)
|
||||
|
||||
**Generated:** 2026-04-16T19:20:09.634Z
|
||||
**Generated:** 2026-04-11T21:10:30.092Z
|
||||
**Authoritative sources:** `config/smart-contracts-master.json`, `cross-chain-pmm-lps/config/deployment-status.json`
|
||||
|
||||
This matrix is the canonical repo-level inventory for **what still needs explorer verification and publication coverage across every network currently tracked in the workspace**.
|
||||
@@ -15,18 +15,18 @@ This matrix is the canonical repo-level inventory for **what still needs explore
|
||||
|
||||
| Chain ID | Chain | Total Entries | Canonical Contracts | cW / Gas Mirrors | PMM Pools | Explorer |
|
||||
| --- | --- | ---: | ---: | ---: | ---: | --- |
|
||||
| 1 | Ethereum Mainnet | 40 | 3 | 14 | 17 | https://etherscan.io |
|
||||
| 10 | Optimism | 31 | 0 | 14 | 12 | https://optimistic.etherscan.io |
|
||||
| 25 | Cronos | 29 | 0 | 14 | 12 | https://cronoscan.com |
|
||||
| 56 | BSC | 28 | 0 | 16 | 10 | https://bscscan.com |
|
||||
| 100 | Gnosis | 28 | 0 | 14 | 10 | https://gnosisscan.io |
|
||||
| 137 | Polygon | 32 | 0 | 15 | 12 | https://polygonscan.com |
|
||||
| 1 | Ethereum Mainnet | 36 | 3 | 14 | 13 | https://etherscan.io |
|
||||
| 10 | Optimism | 21 | 0 | 14 | 2 | https://optimistic.etherscan.io |
|
||||
| 25 | Cronos | 19 | 0 | 14 | 2 | https://cronoscan.com |
|
||||
| 56 | BSC | 18 | 0 | 16 | 0 | https://bscscan.com |
|
||||
| 100 | Gnosis | 18 | 0 | 14 | 0 | https://gnosisscan.io |
|
||||
| 137 | Polygon | 22 | 0 | 15 | 2 | https://polygonscan.com |
|
||||
| 138 | Chain 138 | 115 | 115 | 0 | 0 | https://blockscout.defi-oracle.io |
|
||||
| 1111 | Wemix | 4 | 0 | 2 | 0 | https://explorer.wemix.com |
|
||||
| 8453 | Base | 29 | 0 | 14 | 10 | https://basescan.org |
|
||||
| 42161 | Arbitrum | 29 | 0 | 14 | 10 | https://arbiscan.io |
|
||||
| 42220 | Celo | 29 | 0 | 16 | 10 | https://celoscan.io |
|
||||
| 43114 | Avalanche | 29 | 0 | 16 | 10 | https://snowtrace.io |
|
||||
| 8453 | Base | 19 | 0 | 14 | 0 | https://basescan.org |
|
||||
| 42161 | Arbitrum | 19 | 0 | 14 | 0 | https://arbiscan.io |
|
||||
| 42220 | Celo | 19 | 0 | 16 | 0 | https://celoscan.io |
|
||||
| 43114 | Avalanche | 19 | 0 | 16 | 0 | https://snowtrace.io |
|
||||
|
||||
## Required operator path
|
||||
|
||||
@@ -71,17 +71,13 @@ The JSON report in `reports/status/contract_verification_publish_matrix.json` co
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWCADC/USDC | `0xE0F35b5736FDd0a2F4B618621b0A08F8D8A3f92A` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWCHFC/USDC | `0x776Ca556deD3245984F504F4bef8Eeec55C50190` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWEURC/USDC | `0x0bC750F9c6DbDcd76B205695A356491b1B9ef098` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWEURT/USDC | `0x9cF3DeDAaC0984c530801b9b4881c8f99Bb329c3` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWGBPC/USDC | `0x5488042dF882893a3e7074453E2005CaDE4101b0` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWGBPT/USDC | `0xA42566bb730AD6D551Db32d50c0877132fc07c32` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWJPYC/USDC | `0x8A4187dF0A8FE855cC53A4F7B2D8346588Ee9794` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWUSDC/USDC | `0x69776fc607e9edA8042e320e7e43f54d06c68f0E` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWUSDC/USDT | `0xCC0fd27A40775c9AfcD2BBd3f7c902b0192c247A` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWUSDT/cWUSDC | `0xe944b7Cb012A0820c07f54D51e92f0e1C74168DB` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWUSDT/USDC | `0x27f3aE7EE71Be3d77bAf17d4435cF8B895DD25D2` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWUSDT/USDT | `0x79156F6B7bf71a1B72D78189B540A89A6C13F6FC` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWXAUC/USDC | `0xf6470219ce7749f8860dEABe9c347Ef2c1075E08` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool | cWXAUT/USDC | `0x1D51a38C924382287d770AbB61deb9C39ACa96E9` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool_volatile | cWUSDC/TRUU | `0x9A632F35078b6A4A9bf27806Bb7aFfAA2F16C846` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | pmm_pool_volatile | cWUSDT/TRUU | `0x508E5e80B66204b8CD9869323Fdd3A289ea50993` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
| 1 | Ethereum Mainnet | reference_venue | balancer:cWETH/USDC | `0xba11000000000000000000000000000000000001` | etherscan | inventory-only | https://etherscan.io | pending | pending |
|
||||
@@ -103,18 +99,8 @@ The JSON report in `reports/status/contract_verification_publish_matrix.json` co
|
||||
| 10 | Optimism | cw_token | cWXAUC | `0xddc4063f770f7c49d00b5a10fb552e922aa39b2c` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | cw_token | cWXAUT | `0x145e8e8c49b6a021969dd9d2c01c8fea44374f61` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | gas_mirror | cWETHL2 | `0x95007ec50d0766162f77848edf7bdc4eba147fb4` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWAUDC/USDC | `0x4B452800f6cD50326F14a6f089f4bB04e8079250` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWCADC/USDC | `0x19e1fdd037F1651AcEE11c5A5Aa246b85FA63f8e` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWCHFC/USDC | `0xA97D7dfB93CBf0C10243931d93FFEda745222ec6` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWEURC/USDC | `0x78C6aC6D7CbFcd85A3291D656F2154979a92c00B` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWEURT/USDC | `0x631DfC86A03cB05319d7165198f8099dacF78e56` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWGBPC/USDC | `0x79BE2b70A94E954d095f9F537FAf0741D15dfA31` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWGBPT/USDC | `0x5D6b5d7CA165c39c350083255774DdBf1c858e12` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWJPYC/USDC | `0x68C1c8a945ddCF3482b73aC09b6B5D4177D564AF` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWUSDC/USDC | `0x8F1038dE06d799a30D16d8B0b0ADEe629e7d4547` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWUSDT/USDT | `0xFCB0b0Ac36d67EDBA91100c75C27De945357CD62` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWXAUC/USDC | `0xCE25c324e41049D75abfB81c23257984A2A97a79` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | pmm_pool | cWXAUT/USDC | `0xb3Ee650019d7F756ce0F79b69614Fa2761871775` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | reference_venue | balancer:cWETHL2/USDC | `0xba2100000000000000000000000000000000000a` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | reference_venue | curve:cWETHL2/USDC | `0xc72100000000000000000000000000000000000a` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
| 10 | Optimism | reference_venue | uniswap_v3:cWETHL2/WETH | `0x712100000000000000000000000000000000000a` | etherscan-family | inventory-only | https://optimistic.etherscan.io | pending | pending |
|
||||
@@ -127,6 +113,20 @@ The JSON report in `reports/status/contract_verification_publish_matrix.json` co
|
||||
| 25 | Cronos | cw_token | cWEURC | `0x7574d37F42528B47c88962931e48FC61608a4050` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWEURT | `0x9f833b4f1012F52eb3317b09922a79c6EdFca77D` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWGBPC | `0xe5c65A76A541368d3061fe9E7A2140cABB903dbF` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWGBPT | `0xBb58fa16bAc8E789f09C14243adEE6480D8213A2` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWJPYC | `0x52aD62B8bD01154e2A4E067F8Dc4144C9988d203` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWUSDC | `0x932566E5bB6BEBF6B035B94f3DE1f75f126304Ec` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWUSDT | `0x72948a7a813B60b37Cd0c920C4657DbFF54312b8` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWXAUC | `0xf1B771c95573113E993374c0c7cB2dc1a7908B12` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | cw_token | cWXAUT | `0xD517C0cF7013f988946A468c880Cc9F8e2A4BCbE` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | gas_mirror | cWCRO | `0x9b10eb0f77c45322dbd1fcb07176fd9a7609c164` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | pmm_pool | cWUSDC/USDC | `0x8F1038dE06d799a30D16d8B0b0ADEe629e7d4547` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | pmm_pool | cWUSDT/USDT | `0xFCB0b0Ac36d67EDBA91100c75C27De945357CD62` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 25 | Cronos | reference_venue | uniswap_v3:cWCRO/WCRO | `0x7161000000000000000000000000000000000019` | etherscan-family | inventory-only | https://cronoscan.com | pending | pending |
|
||||
| 56 | BSC | anchor_token | USDT | `0x55d398326f99059fF775485246999027B3197955` | etherscan-family | reference-only | https://bscscan.com | pending | pending |
|
||||
| 56 | BSC | cw_token | cWAUDC | `0x7062f35567BBAb4d98dc33af03B0d14Df42294D5` | etherscan-family | inventory-only | https://bscscan.com | pending | pending |
|
||||
| 56 | BSC | cw_token | cWAUSDT | `0xe1a51Bc037a79AB36767561B147eb41780124934` | etherscan-family | inventory-only | https://bscscan.com | pending | pending |
|
||||
| 56 | BSC | cw_token | cWBTC | `0xcb7c000000000000000000000000000000000038` | etherscan-family | inventory-only | https://bscscan.com | pending | pending |
|
||||
|
||||
## Notes
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Explorer Token List Cross-Check
|
||||
|
||||
**Last Updated:** 2026-04-14
|
||||
**Last Updated:** 2026-04-02
|
||||
**Purpose:** Cross-check the token list shown at [https://explorer.d-bis.org/tokens](https://explorer.d-bis.org/tokens) against repo token lists, canonical addresses, and CONTRACT_ADDRESSES_REFERENCE.
|
||||
|
||||
---
|
||||
@@ -89,7 +89,6 @@ and use `dbis-138.tokenlist.json` as the curated source. See §9.
|
||||
| LINK (0xb7721d…) | Yes | Match |
|
||||
| cUSDT (0x93E6…) | Yes | Match |
|
||||
| cUSDC (0xf22258…) | Yes | Match |
|
||||
| cUSDT V2, cUSDC V2, mirror USDT/USDC | Yes | Match §5; list uses tag **`fwdcanon`** (Uniswap schema tag length); gas rows use **`gasnative`** |
|
||||
| cEURC (0x808596…) | Yes | Match |
|
||||
|
||||
**Historical gap (closed 2026-02-28):**
|
||||
@@ -97,7 +96,7 @@ cEURT, cGBPC, cGBPT, cAUDC, cJPYC, cCHFC, cCADC, cXAUC, cXAUT were initially mis
|
||||
|
||||
### 3.2 `explorer-monorepo/backend/config/metamask/DUAL_CHAIN_TOKEN_LIST.tokenlist.json`
|
||||
|
||||
Chain 138 entries were originally the same 7 as `dbis-138`, but the additional compliant tokens were added on 2026-02-28. **cUSDT/cUSDC V2** and **official mirror USDT/USDC** (D3) are now part of the canonical set in **§5** and appear in `DUAL_CHAIN_TOKEN_LIST`; keep `dbis-138.tokenlist.json` aligned with §5 for packaging parity. Keep this file focused on live Explorer cross-checks rather than the closed token-list gap.
|
||||
Chain 138 entries were originally the same 7 as `dbis-138`, but the additional compliant tokens were added on 2026-02-28. Keep this file focused on live Explorer cross-checks rather than the closed token-list gap.
|
||||
|
||||
### 3.3 `ADDRESS_MATRIX_AND_STATUS.md` / `CONTRACT_ADDRESSES_REFERENCE.md`
|
||||
|
||||
@@ -111,9 +110,9 @@ All **canonical** Chain 138 token addresses (WETH, WETH10, LINK, cUSDT, cUSDC, c
|
||||
|------|--------|----------------|
|
||||
| **Explorer token list source** | Confirmed: Blockscout `GET /api/v2/tokens` | No change; document only. |
|
||||
| **WETH9 on Blockscout** | First token has `decimals: "0"`, `name`/`symbol`: null | Verify WETH9 contract metadata on-chain; fix in contract or in Blockscout indexing if needed. |
|
||||
| **dbis-138.tokenlist.json** | Done (2026-02-28; mirrors 2026-04-14) | Added cEURT, cGBPC, cGBPT, cAUDC, cJPYC, cCHFC, cCADC, cXAUC, cXAUT; **official mirror USDT/USDC** per §5. |
|
||||
| **DUAL_CHAIN_TOKEN_LIST (MetaMask)** | Done (2026-02-28) | Added same 9 tokens to backend and api/rest copies; **V2 + mirrors** tracked in §5 / DUAL. |
|
||||
| **ADDRESS_MATRIX / docs** | Done | cEURT and **§1.1 / §5** token inventory; TransactionMirror 0x7131…; summary updated. |
|
||||
| **dbis-138.tokenlist.json** | Done (2026-02-28) | Added cEURT, cGBPC, cGBPT, cAUDC, cJPYC, cCHFC, cCADC, cXAUC, cXAUT. |
|
||||
| **DUAL_CHAIN_TOKEN_LIST (MetaMask)** | Done (2026-02-28) | Added same 9 tokens to backend and api/rest copies. |
|
||||
| **ADDRESS_MATRIX / docs** | Done | cEURT and all 16 tokens in §1.1; TransactionMirror 0x7131…; summary updated. |
|
||||
| **Extra LINK/cUSDT/cUSDC on Blockscout** | 6 additional contracts | Non-canonical; use §5 canonical addresses only. |
|
||||
|
||||
---
|
||||
@@ -129,10 +128,6 @@ Use this table to align token lists and docs with the Explorer (Blockscout) and
|
||||
| LINK | `0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03` | 18 |
|
||||
| cUSDT | `0x93E66202A11B1772E55407B32B44e5Cd8eda7f22` | 6 |
|
||||
| cUSDC | `0xf22258f57794CC8E06237084b353Ab30fFfa640b` | 6 |
|
||||
| cUSDT V2 | `0x9FBfab33882Efe0038DAa608185718b772EE5660` | 6 |
|
||||
| cUSDC V2 | `0x219522c60e83dEe01FC5b0329d6fA8fD84b9D13d` | 6 |
|
||||
| USDT (official mirror, D3) | `0x004b63A7B5b0E06f6bB6adb4a5F9f590BF3182D1` | 6 |
|
||||
| USDC (official mirror, D3) | `0x71D6687F38b93CCad569Fa6352c876eea967201b` | 6 |
|
||||
| cEURC | `0x8085961F9cF02b4d800A3c6d386D31da4B34266a` | 6 |
|
||||
| cEURT | `0xdf4b71c61E5912712C1Bdd451416B9aC26949d72` | 6 |
|
||||
| cGBPC | `0x003960f16D9d34F2e98d62723B6721Fb92074aD2` | 6 |
|
||||
@@ -159,7 +154,7 @@ Use this table to align token lists and docs with the Explorer (Blockscout) and
|
||||
| `explorer-monorepo/backend/config/metamask/DUAL_CHAIN_TOKEN_LIST.tokenlist.json` | Multi-chain token list (138, 1, 651940, 25) for MetaMask |
|
||||
| `token-lists/lists/dbis-138.tokenlist.json` | Chain 138 curated token list (Uniswap-style) |
|
||||
| `smom-dbis-138/services/token-aggregation/src/config/canonical-tokens.ts` | Canonical addresses and env overrides for indexing/reporting |
|
||||
| `docs/11-references/ADDRESS_MATRIX_AND_STATUS.md` | Correlated address matrix; §1.1 aligned with **§5** canonical rows; TransactionMirror and summary updated |
|
||||
| `docs/11-references/ADDRESS_MATRIX_AND_STATUS.md` | Correlated address matrix; §1.1 includes all 16 tokens; TransactionMirror and summary updated |
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
| **DODO Vending Machine / Adapter** | Deployed | `0xb6D9EF3575bc48De3f011C310DC24d87bEC6087C` — adapter used by `DODOPMMIntegration`. |
|
||||
| **PMM pools (current canonical stack)** | 3 created | Public pools are `0x9e89bAe009adf128782E19e8341996c596ac40dC` (cUSDT/cUSDC), `0x866Cb44b59303d8dc5f4F9E3E7A8e8b0bf238d66` (cUSDT/USDT), `0xc39B7D0F40838cbFb54649d327f49a6DAC964062` (cUSDC/USDC). |
|
||||
| **DODOPMMProvider** | **Deployed** | `0x3f729632E9553EBacCdE2e9b4c8F2B285b014F2e`; supports the three canonical stable pools above. |
|
||||
| **D3Oracle** | Deployed (private pilot) | `0xD7459aEa8bB53C83a1e90262777D730539A326F0`; `WETH10` uses keeper-synced **MockPriceFeed** `0x3e8725b8De386feF3eFE5678c92eA6aDB41992B2` (avoids managed-aggregator staleness on Besu). Legacy slot `0x99b3511a2d315a497c8112c1fdd8d508d4b1e506` remains for audits. Stables use managed USD feeds. |
|
||||
| **D3Oracle** | Deployed (private pilot) | `0xD7459aEa8bB53C83a1e90262777D730539A326F0`; `WETH10` now uses live `Oracle_Aggregator=0x99b3511a2d315a497c8112c1fdd8d508d4b1e506`, and the stable assets use dedicated managed USD feeds. |
|
||||
| **D3Vault / D3Proxy / D3MMFactory** | Deployed (private pilot) | `D3Vault=0x42b6867260Fb9eE6d09B7E0233A1fAD65D0133D1`, `D3Proxy=0xc9a11abB7C63d88546Be24D58a6d95e3762cB843`, `D3MMFactory=0x78470C7d2925B6738544E2DD4FE7c07CcA21AC31`. |
|
||||
| **D3MM pools** | 2 created | `0xE71Bc2cCb62dA5B18F88647db2b4a721Db416fc5` is a superseded bootstrap pool on the placeholder `WETH9` path. `0x6550A3a59070061a262a893A1D6F3F490afFDBDA` is the canonical private `WETH10` pilot pool. |
|
||||
| **EnhancedSwapRouterV2 + public venue layer** | Live | Router-v2 is deployed and the canonical upstream-native `Uniswap_v3` lane plus the funded pilot-compatible `Balancer`, `Curve_3`, and `1inch` venues are publicly exposed for the canonical Chain 138 routing asset lanes. |
|
||||
|
||||
@@ -17,25 +17,6 @@ import http from 'node:http';
|
||||
const PORT = parseInt(process.env.PORT || '3080', 10);
|
||||
const BLOCKSCOUT_URL = (process.env.BLOCKSCOUT_URL || 'http://192.168.11.140:4000').replace(/\/$/, '');
|
||||
|
||||
function parseOptimizationRuns(payload) {
|
||||
const raw = payload.runs ?? payload.optimization_runs ?? '200';
|
||||
return parseInt(raw, 10) || 200;
|
||||
}
|
||||
|
||||
function inferOptimizationEnabled(payload) {
|
||||
const explicit = payload.optimizationUsed ?? payload.optimization_used;
|
||||
if (explicit !== undefined && explicit !== null && explicit !== '') {
|
||||
return [true, '1', 1, 'true'].includes(explicit);
|
||||
}
|
||||
// Forge often supplies runs without optimizationUsed for legacy compiler paths.
|
||||
// When runs is positive, assume optimization was intentionally enabled.
|
||||
return parseOptimizationRuns(payload) > 0;
|
||||
}
|
||||
|
||||
function inferEvmVersion(payload) {
|
||||
return payload.evmversion || payload.evm_version || 'default';
|
||||
}
|
||||
|
||||
/** Parse body as JSON or application/x-www-form-urlencoded (Forge/Etherscan style). */
|
||||
function parseBody(req) {
|
||||
return new Promise((resolve, reject) => {
|
||||
@@ -142,9 +123,9 @@ async function forwardV2Flattened(payload) {
|
||||
compiler_version: payload.compilerversion || payload.compilerVersion || 'v0.8.20+commit.a1b79de6',
|
||||
contract_name: payload.contractname || payload.contractName || 'Contract',
|
||||
license_type: payload.licensetype || payload.licenseType || 'mit',
|
||||
is_optimization_enabled: inferOptimizationEnabled(payload),
|
||||
optimization_runs: parseOptimizationRuns(payload),
|
||||
evm_version: inferEvmVersion(payload),
|
||||
is_optimization_enabled: [true, '1', 1, 'true'].includes(payload.optimizationUsed ?? payload.optimization_used),
|
||||
optimization_runs: parseInt(payload.runs ?? payload.optimization_runs ?? '200', 10) || 200,
|
||||
evm_version: payload.evmversion || payload.evm_version || 'london',
|
||||
autodetect_constructor_args: payload.autodetectConstructorArguments !== false,
|
||||
source_code: typeof sourceCode === 'string' ? sourceCode : JSON.stringify(sourceCode),
|
||||
};
|
||||
@@ -225,9 +206,13 @@ async function forwardV2StandardInput(payload) {
|
||||
appendField('autodetect_constructor_args', String(payload.autodetectConstructorArguments !== false));
|
||||
appendField('license_type', licenseType);
|
||||
appendField('constructor_args', constructorArgs);
|
||||
appendField('evm_version', inferEvmVersion(payload));
|
||||
appendField('is_optimization_enabled', String(inferOptimizationEnabled(payload)));
|
||||
appendField('optimization_runs', String(parseOptimizationRuns(payload)));
|
||||
if (payload.evmversion || payload.evm_version) appendField('evm_version', payload.evmversion || payload.evm_version);
|
||||
if (payload.optimizationUsed !== undefined || payload.optimization_used !== undefined) {
|
||||
appendField('is_optimization_enabled', String([true, '1', 1, 'true'].includes(payload.optimizationUsed ?? payload.optimization_used)));
|
||||
}
|
||||
if (payload.runs !== undefined || payload.optimization_runs !== undefined) {
|
||||
appendField('optimization_runs', String(parseInt(payload.runs ?? payload.optimization_runs ?? '200', 10) || 200));
|
||||
}
|
||||
appendFile('files[0]', 'standard-input.json', standardJson, 'application/json');
|
||||
parts.push(Buffer.from(`--${boundary}--\r\n`));
|
||||
const body = Buffer.concat(parts);
|
||||
@@ -269,15 +254,7 @@ function toEtherscanResponse(result) {
|
||||
return { status: '1', message: data.message || 'OK', result: data.result ?? 'Verification submitted' };
|
||||
}
|
||||
if (status >= 200 && status < 300) {
|
||||
const successMessage = typeof data?.message === 'string' ? data.message : '';
|
||||
const successResult = typeof data?.result === 'string' ? data.result : '';
|
||||
if (
|
||||
/verification started/i.test(successMessage) ||
|
||||
/verification submitted/i.test(successMessage) ||
|
||||
/verification submitted/i.test(successResult)
|
||||
) {
|
||||
return { status: '1', message: successMessage || 'OK', result: successResult || 'Verification submitted' };
|
||||
}
|
||||
return { status: '1', message: 'OK', result: data?.result ?? 'Verification submitted' };
|
||||
}
|
||||
// Blockscout may return HTML (502/500) or invalid JSON when DB/migrations fail
|
||||
let msg = data?.message || data?.error;
|
||||
|
||||
247
phoenix-deploy-api/deploy-targets.json
Normal file
247
phoenix-deploy-api/deploy-targets.json
Normal file
@@ -0,0 +1,247 @@
|
||||
{
|
||||
"defaults": {
|
||||
"timeout_sec": 1800
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "default",
|
||||
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"phoenix-deploy-api/scripts/install-systemd.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.59:4001/health",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "phoenix-deploy-api",
|
||||
"attempts": 8,
|
||||
"delay_ms": 3000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "cloudflare-sync",
|
||||
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "cloudflare-sync-force",
|
||||
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "portal-live",
|
||||
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/sync-sankofa-portal-7801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"SANKOFA_PORTAL_SRC"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.51:3000/",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "<html",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/CurrenciCombo",
|
||||
"branch": "main",
|
||||
"target": "default",
|
||||
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://curucombo.xn--vov0g.com/api/ready",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"ready\":true",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "atomic-swap-dapp-live",
|
||||
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"liveBridgeRoutes\"",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "default",
|
||||
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"phoenix-deploy-api/scripts/install-systemd.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.59:4001/health",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "phoenix-deploy-api",
|
||||
"attempts": 8,
|
||||
"delay_ms": 3000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "atomic-swap-dapp-live",
|
||||
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"liveBridgeRoutes\"",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "cloudflare-sync",
|
||||
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "cloudflare-sync-force",
|
||||
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "portal-live",
|
||||
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/sync-sankofa-portal-7801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"SANKOFA_PORTAL_SRC"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.51:3000/",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "<html",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/CurrenciCombo",
|
||||
"branch": "master",
|
||||
"target": "default",
|
||||
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://curucombo.xn--vov0g.com/api/ready",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"ready\":true",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -25,7 +25,70 @@ if [[ -f "$REPO_ROOT/config/public-sector-program-manifest.json" ]]; then
|
||||
else
|
||||
echo "WARN: $REPO_ROOT/config/public-sector-program-manifest.json missing — set PUBLIC_SECTOR_MANIFEST_PATH in .env"
|
||||
fi
|
||||
[ -f "$APP_DIR/.env" ] && cp "$APP_DIR/.env" "$TARGET/.env" || [ -f "$APP_DIR/.env.example" ] && cp "$APP_DIR/.env.example" "$TARGET/.env" || true
|
||||
if [[ -f "$TARGET/.env" ]]; then
|
||||
echo "Preserving existing $TARGET/.env"
|
||||
elif [[ -f "$APP_DIR/.env" ]]; then
|
||||
cp "$APP_DIR/.env" "$TARGET/.env"
|
||||
elif [[ -f "$APP_DIR/.env.example" ]]; then
|
||||
cp "$APP_DIR/.env.example" "$TARGET/.env"
|
||||
fi
|
||||
|
||||
ensure_env_value() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local file="$TARGET/.env"
|
||||
[[ -n "$value" && -f "$file" ]] || return 0
|
||||
|
||||
local current=""
|
||||
if grep -qE "^${key}=" "$file"; then
|
||||
current="$(grep -E "^${key}=" "$file" | tail -n 1 | cut -d= -f2-)"
|
||||
fi
|
||||
[[ -z "$current" ]] || return 0
|
||||
|
||||
local tmp
|
||||
tmp="$(mktemp)"
|
||||
awk -v key="$key" -v value="$value" '
|
||||
BEGIN { found = 0 }
|
||||
$0 ~ "^" key "=" {
|
||||
print key "=" value
|
||||
found = 1
|
||||
next
|
||||
}
|
||||
{ print }
|
||||
END {
|
||||
if (!found) print key "=" value
|
||||
}
|
||||
' "$file" > "$tmp"
|
||||
cat "$tmp" > "$file"
|
||||
rm -f "$tmp"
|
||||
}
|
||||
|
||||
repo_env_value() {
|
||||
local key="$1"
|
||||
local file="$REPO_ROOT/.env"
|
||||
[[ -f "$file" ]] || return 0
|
||||
grep -E "^${key}=" "$file" | tail -n 1 | cut -d= -f2-
|
||||
}
|
||||
|
||||
if [[ -f "$TARGET/.env" ]]; then
|
||||
ensure_env_value PHOENIX_REPO_ROOT "$REPO_ROOT"
|
||||
for key in \
|
||||
GITEA_TOKEN \
|
||||
PHOENIX_DEPLOY_SECRET \
|
||||
PROXMOX_HOST \
|
||||
PROXMOX_PORT \
|
||||
PROXMOX_USER \
|
||||
PROXMOX_TOKEN_NAME \
|
||||
PROXMOX_TOKEN_VALUE \
|
||||
PROXMOX_TLS_VERIFY \
|
||||
PUBLIC_IP \
|
||||
CLOUDFLARE_API_TOKEN \
|
||||
CLOUDFLARE_GITEA_SYNC_ZONE \
|
||||
PHOENIX_CLOUDFLARE_SYNC
|
||||
do
|
||||
ensure_env_value "$key" "$(repo_env_value "$key")"
|
||||
done
|
||||
fi
|
||||
chown -R root:root "$TARGET"
|
||||
cd "$TARGET" && npm install --omit=dev
|
||||
cp "$APP_DIR/phoenix-deploy-api.service" /etc/systemd/system/
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Phoenix Deploy API — Gitea webhook receiver, deploy stub, and Phoenix API Railing (Infra/VE)
|
||||
* Phoenix Deploy API — Gitea webhook receiver, deploy execution API, and Phoenix API Railing (Infra/VE)
|
||||
*
|
||||
* Endpoints:
|
||||
* POST /webhook/gitea — Receives Gitea push/tag/PR webhooks
|
||||
@@ -19,7 +19,9 @@
|
||||
import crypto from 'crypto';
|
||||
import https from 'https';
|
||||
import path from 'path';
|
||||
import { readFileSync, existsSync } from 'fs';
|
||||
import { promisify } from 'util';
|
||||
import { execFile as execFileCallback } from 'child_process';
|
||||
import { cpSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { fileURLToPath } from 'url';
|
||||
import express from 'express';
|
||||
|
||||
@@ -29,6 +31,13 @@ const PORT = parseInt(process.env.PORT || '4001', 10);
|
||||
const GITEA_URL = (process.env.GITEA_URL || 'https://gitea.d-bis.org').replace(/\/$/, '');
|
||||
const GITEA_TOKEN = process.env.GITEA_TOKEN || '';
|
||||
const WEBHOOK_SECRET = process.env.PHOENIX_DEPLOY_SECRET || '';
|
||||
const PHOENIX_REPO_ROOT_DEFAULT = (process.env.PHOENIX_REPO_ROOT_DEFAULT || '/srv/projects/proxmox').trim();
|
||||
const ATOMIC_SWAP_REPO = (process.env.PHOENIX_ATOMIC_SWAP_REPO || 'd-bis/atomic-swap-dapp').trim();
|
||||
const ATOMIC_SWAP_REF = (process.env.PHOENIX_ATOMIC_SWAP_REF || 'main').trim();
|
||||
const CROSS_CHAIN_PMM_LPS_REPO = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REPO || '').trim();
|
||||
const CROSS_CHAIN_PMM_LPS_REF = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REF || 'main').trim();
|
||||
const SMOM_DBIS_138_REPO = (process.env.PHOENIX_SMOM_DBIS_138_REPO || '').trim();
|
||||
const SMOM_DBIS_138_REF = (process.env.PHOENIX_SMOM_DBIS_138_REF || 'main').trim();
|
||||
|
||||
const PROXMOX_HOST = process.env.PROXMOX_HOST || '';
|
||||
const PROXMOX_PORT = parseInt(process.env.PROXMOX_PORT || '8006', 10);
|
||||
@@ -42,6 +51,17 @@ const PROMETHEUS_URL = (process.env.PROMETHEUS_URL || 'http://localhost:9090').r
|
||||
const PHOENIX_WEBHOOK_URL = process.env.PHOENIX_WEBHOOK_URL || '';
|
||||
const PHOENIX_WEBHOOK_SECRET = process.env.PHOENIX_WEBHOOK_SECRET || '';
|
||||
const PARTNER_KEYS = (process.env.PHOENIX_PARTNER_KEYS || '').split(',').map((k) => k.trim()).filter(Boolean);
|
||||
const WEBHOOK_DEPLOY_ENABLED = process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === '1' || process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === 'true';
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
function expandEnvTokens(value, env = process.env) {
|
||||
if (typeof value !== 'string') return value;
|
||||
return value.replace(/\$\{([A-Z0-9_]+)\}/gi, (_, key) => env[key] || '');
|
||||
}
|
||||
|
||||
function resolvePhoenixRepoRoot() {
|
||||
return (process.env.PHOENIX_REPO_ROOT || PHOENIX_REPO_ROOT_DEFAULT || '').trim().replace(/\/$/, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Manifest resolution order:
|
||||
@@ -63,15 +83,395 @@ function resolvePublicSectorManifestPath() {
|
||||
return path.join(__dirname, '..', 'config', 'public-sector-program-manifest.json');
|
||||
}
|
||||
|
||||
function resolveDeployTargetsPath() {
|
||||
const override = (process.env.DEPLOY_TARGETS_PATH || '').trim();
|
||||
if (override && existsSync(override)) return override;
|
||||
const bundled = path.join(__dirname, 'deploy-targets.json');
|
||||
if (existsSync(bundled)) return bundled;
|
||||
return bundled;
|
||||
}
|
||||
|
||||
function loadDeployTargetsConfig() {
|
||||
const configPath = resolveDeployTargetsPath();
|
||||
if (!existsSync(configPath)) {
|
||||
return {
|
||||
path: configPath,
|
||||
defaults: {},
|
||||
targets: [],
|
||||
};
|
||||
}
|
||||
const raw = readFileSync(configPath, 'utf8');
|
||||
const parsed = JSON.parse(raw);
|
||||
return {
|
||||
path: configPath,
|
||||
defaults: parsed.defaults || {},
|
||||
targets: Array.isArray(parsed.targets) ? parsed.targets : [],
|
||||
};
|
||||
}
|
||||
|
||||
function findDeployTarget(repo, branch, requestedTarget) {
|
||||
const config = loadDeployTargetsConfig();
|
||||
const wantedTarget = requestedTarget || 'default';
|
||||
const match = config.targets.find((entry) => {
|
||||
if (entry.repo !== repo) return false;
|
||||
if ((entry.branch || 'main') !== branch) return false;
|
||||
return (entry.target || 'default') === wantedTarget;
|
||||
});
|
||||
return { config, match, wantedTarget };
|
||||
}
|
||||
|
||||
async function sleep(ms) {
|
||||
await new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
async function verifyHealthCheck(healthcheck) {
|
||||
if (!healthcheck || !healthcheck.url) return null;
|
||||
|
||||
const attempts = Math.max(1, Number(healthcheck.attempts || 1));
|
||||
const delayMs = Math.max(0, Number(healthcheck.delay_ms || 0));
|
||||
const timeoutMs = Math.max(1000, Number(healthcheck.timeout_ms || 10000));
|
||||
const expectedStatus = Number(healthcheck.expect_status || 200);
|
||||
const expectBodyIncludes = healthcheck.expect_body_includes || '';
|
||||
|
||||
let lastError = null;
|
||||
for (let attempt = 1; attempt <= attempts; attempt += 1) {
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
||||
const res = await fetch(healthcheck.url, { signal: controller.signal });
|
||||
const body = await res.text();
|
||||
clearTimeout(timeout);
|
||||
|
||||
if (res.status !== expectedStatus) {
|
||||
throw new Error(`Expected HTTP ${expectedStatus}, got ${res.status}`);
|
||||
}
|
||||
if (expectBodyIncludes && !body.includes(expectBodyIncludes)) {
|
||||
throw new Error(`Health body missing expected text: ${expectBodyIncludes}`);
|
||||
}
|
||||
|
||||
return {
|
||||
ok: true,
|
||||
url: healthcheck.url,
|
||||
status: res.status,
|
||||
attempt,
|
||||
};
|
||||
} catch (err) {
|
||||
lastError = err;
|
||||
if (attempt < attempts && delayMs > 0) {
|
||||
await sleep(delayMs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Health check failed for ${healthcheck.url}: ${lastError?.message || 'unknown error'}`);
|
||||
}
|
||||
|
||||
async function downloadRepoArchive({ owner, repo, ref, archivePath, authToken }) {
|
||||
const archiveRef = `${ref}.tar.gz`;
|
||||
const url = `${GITEA_URL}/api/v1/repos/${owner}/${repo}/archive/${archiveRef}`;
|
||||
const headers = {};
|
||||
if (authToken) headers.Authorization = `token ${authToken}`;
|
||||
const res = await fetch(url, { headers });
|
||||
if (!res.ok) {
|
||||
throw new Error(`Failed to download archive ${owner}/${repo}@${ref}: HTTP ${res.status}`);
|
||||
}
|
||||
const buffer = Buffer.from(await res.arrayBuffer());
|
||||
writeFileSync(archivePath, buffer);
|
||||
}
|
||||
|
||||
function syncExtractedTree({ sourceRoot, destRoot, entries = null }) {
|
||||
mkdirSync(destRoot, { recursive: true });
|
||||
const selectedEntries = Array.isArray(entries) ? entries : readdirSync(sourceRoot);
|
||||
for (const entry of selectedEntries) {
|
||||
const sourcePath = path.join(sourceRoot, entry);
|
||||
if (!existsSync(sourcePath)) continue;
|
||||
const destPath = path.join(destRoot, entry);
|
||||
rmSync(destPath, { recursive: true, force: true });
|
||||
cpSync(sourcePath, destPath, { recursive: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function syncRepoArchive({ owner, repo, ref, destRoot, entries = null, authToken = '' }) {
|
||||
const tempDir = mkdtempSync('/tmp/phoenix-archive-');
|
||||
const archivePath = path.join(tempDir, 'repo.tar.gz');
|
||||
const extractDir = path.join(tempDir, 'extract');
|
||||
mkdirSync(extractDir, { recursive: true });
|
||||
try {
|
||||
await downloadRepoArchive({ owner, repo, ref, archivePath, authToken });
|
||||
await execFile('tar', ['-xzf', archivePath, '-C', extractDir]);
|
||||
const [rootDir] = readdirSync(extractDir);
|
||||
if (!rootDir) {
|
||||
throw new Error(`Archive for ${owner}/${repo}@${ref} was empty`);
|
||||
}
|
||||
syncExtractedTree({
|
||||
sourceRoot: path.join(extractDir, rootDir),
|
||||
destRoot,
|
||||
entries,
|
||||
});
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function prepareDeployWorkspace({ repo, branch, sha, target }) {
|
||||
const repoRoot = resolvePhoenixRepoRoot();
|
||||
if (!repoRoot) {
|
||||
throw new Error('PHOENIX_REPO_ROOT is not configured');
|
||||
}
|
||||
|
||||
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
|
||||
const externalWorkspaceRoot = path.join(repoRoot, '.phoenix-deploy-workspaces', owner, repoName);
|
||||
|
||||
// Manual smoke tests can target the already-staged local workspace without
|
||||
// forcing an archive sync from Gitea.
|
||||
if (sha === 'HEAD' || sha === 'local') {
|
||||
mkdirSync(repoRoot, { recursive: true });
|
||||
if (repo !== 'd-bis/proxmox') {
|
||||
mkdirSync(externalWorkspaceRoot, { recursive: true });
|
||||
}
|
||||
return {
|
||||
PHOENIX_REPO_ROOT: repoRoot,
|
||||
PROXMOX_REPO_ROOT: repoRoot,
|
||||
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
|
||||
};
|
||||
}
|
||||
|
||||
const ref = sha || branch || 'main';
|
||||
|
||||
if (repo === 'd-bis/proxmox') {
|
||||
await syncRepoArchive({
|
||||
owner,
|
||||
repo: repoName,
|
||||
ref,
|
||||
destRoot: repoRoot,
|
||||
entries: ['config', 'phoenix-deploy-api', 'reports', 'scripts', 'token-lists'],
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
} else {
|
||||
await syncRepoArchive({
|
||||
owner,
|
||||
repo: repoName,
|
||||
ref,
|
||||
destRoot: externalWorkspaceRoot,
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
|
||||
if (repo === 'd-bis/proxmox' && target === 'atomic-swap-dapp-live') {
|
||||
const [swapOwner, swapRepo] = ATOMIC_SWAP_REPO.includes('/')
|
||||
? ATOMIC_SWAP_REPO.split('/')
|
||||
: ['d-bis', ATOMIC_SWAP_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: swapOwner,
|
||||
repo: swapRepo,
|
||||
ref: ATOMIC_SWAP_REF,
|
||||
destRoot: path.join(repoRoot, 'atomic-swap-dapp'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
|
||||
if (CROSS_CHAIN_PMM_LPS_REPO) {
|
||||
const [lpsOwner, lpsRepo] = CROSS_CHAIN_PMM_LPS_REPO.includes('/')
|
||||
? CROSS_CHAIN_PMM_LPS_REPO.split('/')
|
||||
: ['d-bis', CROSS_CHAIN_PMM_LPS_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: lpsOwner,
|
||||
repo: lpsRepo,
|
||||
ref: CROSS_CHAIN_PMM_LPS_REF,
|
||||
destRoot: path.join(repoRoot, 'cross-chain-pmm-lps'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
|
||||
if (SMOM_DBIS_138_REPO) {
|
||||
const [smomOwner, smomRepo] = SMOM_DBIS_138_REPO.includes('/')
|
||||
? SMOM_DBIS_138_REPO.split('/')
|
||||
: ['d-bis', SMOM_DBIS_138_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: smomOwner,
|
||||
repo: smomRepo,
|
||||
ref: SMOM_DBIS_138_REF,
|
||||
destRoot: path.join(repoRoot, 'smom-dbis-138'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
PHOENIX_REPO_ROOT: repoRoot,
|
||||
PROXMOX_REPO_ROOT: repoRoot,
|
||||
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
|
||||
};
|
||||
}
|
||||
|
||||
async function runDeployTarget(definition, configDefaults, context, envOverrides = {}) {
|
||||
if (!Array.isArray(definition.command) || definition.command.length === 0) {
|
||||
throw new Error('Deploy target is missing a command array');
|
||||
}
|
||||
|
||||
const childEnv = {
|
||||
...process.env,
|
||||
...envOverrides,
|
||||
PHOENIX_DEPLOY_REPO: context.repo,
|
||||
PHOENIX_DEPLOY_BRANCH: context.branch,
|
||||
PHOENIX_DEPLOY_SHA: context.sha || '',
|
||||
PHOENIX_DEPLOY_TARGET: context.target,
|
||||
PHOENIX_DEPLOY_TRIGGER: context.trigger,
|
||||
};
|
||||
|
||||
const cwd = expandEnvTokens(definition.cwd || configDefaults.cwd || process.cwd(), childEnv);
|
||||
const timeoutSeconds = Number(definition.timeout_sec || configDefaults.timeout_sec || 1800);
|
||||
const timeout = Number.isFinite(timeoutSeconds) && timeoutSeconds > 0 ? timeoutSeconds * 1000 : 1800 * 1000;
|
||||
const command = definition.command.map((part) => expandEnvTokens(part, childEnv));
|
||||
const missingEnv = (definition.required_env || []).filter((key) => !childEnv[key]);
|
||||
if (missingEnv.length > 0) {
|
||||
throw new Error(`Missing required env for deploy target: ${missingEnv.join(', ')}`);
|
||||
}
|
||||
if (!existsSync(cwd)) {
|
||||
throw new Error(`Deploy working directory does not exist: ${cwd}`);
|
||||
}
|
||||
|
||||
const { stdout, stderr } = await execFile(command[0], command.slice(1), {
|
||||
cwd,
|
||||
env: childEnv,
|
||||
timeout,
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
});
|
||||
|
||||
const healthcheck = await verifyHealthCheck(definition.healthcheck || configDefaults.healthcheck || null);
|
||||
|
||||
return {
|
||||
cwd,
|
||||
command,
|
||||
stdout: stdout || '',
|
||||
stderr: stderr || '',
|
||||
timeout_sec: timeoutSeconds,
|
||||
healthcheck,
|
||||
};
|
||||
}
|
||||
|
||||
async function executeDeploy({ repo, branch = 'main', target = 'default', sha = '', trigger = 'api' }) {
|
||||
if (!repo) {
|
||||
const error = new Error('repo required');
|
||||
error.statusCode = 400;
|
||||
error.payload = { error: error.message };
|
||||
throw error;
|
||||
}
|
||||
|
||||
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
|
||||
const commitSha = sha || '';
|
||||
const requestedTarget = target || 'default';
|
||||
const { config, match, wantedTarget } = findDeployTarget(repo, branch, requestedTarget);
|
||||
|
||||
if (!match) {
|
||||
const error = new Error('Deploy target not configured');
|
||||
error.statusCode = 404;
|
||||
error.payload = {
|
||||
error: error.message,
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
config_path: config.path,
|
||||
};
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `No deploy target for ${repo} ${branch} ${wantedTarget}`);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
|
||||
}
|
||||
|
||||
console.log(`[deploy] ${repo} branch=${branch} target=${wantedTarget} sha=${commitSha} trigger=${trigger}`);
|
||||
|
||||
let deployResult = null;
|
||||
let deployError = null;
|
||||
let envOverrides = {};
|
||||
|
||||
try {
|
||||
envOverrides = await prepareDeployWorkspace({
|
||||
repo,
|
||||
branch,
|
||||
sha: commitSha,
|
||||
target: wantedTarget,
|
||||
});
|
||||
deployResult = await runDeployTarget(match, config.defaults, {
|
||||
repo,
|
||||
branch,
|
||||
sha: commitSha,
|
||||
target: wantedTarget,
|
||||
trigger,
|
||||
}, envOverrides);
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'success', `Deployed to ${wantedTarget}`);
|
||||
}
|
||||
return {
|
||||
status: 'completed',
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
config_path: config.path,
|
||||
command: deployResult.command,
|
||||
cwd: deployResult.cwd,
|
||||
stdout: deployResult.stdout,
|
||||
stderr: deployResult.stderr,
|
||||
healthcheck: deployResult.healthcheck,
|
||||
};
|
||||
} catch (err) {
|
||||
deployError = err;
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `Deploy failed: ${err.message.slice(0, 120)}`);
|
||||
}
|
||||
err.statusCode = err.statusCode || 500;
|
||||
err.payload = err.payload || {
|
||||
error: err.message,
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
config_path: config.path,
|
||||
};
|
||||
throw err;
|
||||
} finally {
|
||||
if (PHOENIX_WEBHOOK_URL) {
|
||||
const payload = {
|
||||
event: 'deploy.completed',
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
sha: commitSha,
|
||||
success: Boolean(deployResult),
|
||||
command: deployResult?.command,
|
||||
cwd: deployResult?.cwd,
|
||||
phoenix_repo_root: envOverrides.PHOENIX_REPO_ROOT || null,
|
||||
error: deployError?.message || null,
|
||||
};
|
||||
const body = JSON.stringify(payload);
|
||||
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
|
||||
fetch(PHOENIX_WEBHOOK_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
|
||||
body,
|
||||
}).catch((e) => console.error('[webhook] outbound failed', e.message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const httpsAgent = new https.Agent({ rejectUnauthorized: process.env.PROXMOX_TLS_VERIFY !== '0' });
|
||||
|
||||
function formatProxmoxAuthHeader(user, tokenName, tokenValue) {
|
||||
if (tokenName.includes('!')) {
|
||||
return `PVEAPIToken=${tokenName}=${tokenValue}`;
|
||||
}
|
||||
return `PVEAPIToken=${user}!${tokenName}=${tokenValue}`;
|
||||
}
|
||||
|
||||
async function proxmoxRequest(endpoint, method = 'GET', body = null) {
|
||||
const baseUrl = `https://${PROXMOX_HOST}:${PROXMOX_PORT}/api2/json`;
|
||||
const url = `${baseUrl}${endpoint}`;
|
||||
const options = {
|
||||
method,
|
||||
headers: {
|
||||
Authorization: `PVEAPIToken=${PROXMOX_USER}!${PROXMOX_TOKEN_NAME}=${PROXMOX_TOKEN_VALUE}`,
|
||||
Authorization: formatProxmoxAuthHeader(PROXMOX_USER, PROXMOX_TOKEN_NAME, PROXMOX_TOKEN_VALUE),
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
agent: httpsAgent,
|
||||
@@ -162,12 +562,44 @@ app.post('/webhook/gitea', async (req, res) => {
|
||||
|
||||
if (action === 'push' || (action === 'synchronize' && payload.pull_request)) {
|
||||
if (branch === 'main' || branch === 'master' || ref.startsWith('refs/tags/')) {
|
||||
if (sha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, sha, 'pending', 'Phoenix deployment triggered');
|
||||
if (!WEBHOOK_DEPLOY_ENABLED) {
|
||||
return res.status(200).json({
|
||||
received: true,
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
deployed: false,
|
||||
message: 'Webhook accepted; set PHOENIX_WEBHOOK_DEPLOY_ENABLED=1 to execute deploys from webhook events.',
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await executeDeploy({
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
target: 'default',
|
||||
trigger: 'webhook',
|
||||
});
|
||||
return res.status(200).json({
|
||||
received: true,
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
deployed: true,
|
||||
result,
|
||||
});
|
||||
} catch (err) {
|
||||
return res.status(200).json({
|
||||
received: true,
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
deployed: false,
|
||||
error: err.message,
|
||||
details: err.payload || null,
|
||||
});
|
||||
}
|
||||
// Stub: enqueue deploy; actual implementation would call Proxmox/deploy logic
|
||||
console.log(`[deploy-stub] Would deploy ${fullName} branch=${branch} sha=${sha}`);
|
||||
// Stub: when full deploy runs, call setGiteaCommitStatus(owner, repoName, sha, 'success'|'failure', ...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,47 +617,36 @@ app.post('/api/deploy', async (req, res) => {
|
||||
}
|
||||
|
||||
const { repo, branch = 'main', target, sha } = req.body;
|
||||
if (!repo) {
|
||||
return res.status(400).json({ error: 'repo required' });
|
||||
try {
|
||||
const result = await executeDeploy({
|
||||
repo,
|
||||
branch,
|
||||
sha,
|
||||
target,
|
||||
trigger: 'api',
|
||||
});
|
||||
res.status(200).json(result);
|
||||
} catch (err) {
|
||||
res.status(err.statusCode || 500).json(err.payload || { error: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
|
||||
const commitSha = sha || '';
|
||||
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
|
||||
}
|
||||
|
||||
console.log(`[deploy] ${repo} branch=${branch} target=${target || 'default'} sha=${commitSha}`);
|
||||
// Stub: no real deploy yet — report success so Gitea shows green; replace with real deploy + setGiteaCommitStatus on completion
|
||||
const deploySuccess = true;
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(
|
||||
owner,
|
||||
repoName,
|
||||
commitSha,
|
||||
deploySuccess ? 'success' : 'failure',
|
||||
deploySuccess ? 'Deploy accepted (stub)' : 'Deploy failed (stub)'
|
||||
);
|
||||
}
|
||||
res.status(202).json({
|
||||
status: 'accepted',
|
||||
repo,
|
||||
branch,
|
||||
target: target || 'default',
|
||||
message: 'Deploy request queued (stub). Implement full deploy logic in Sankofa Phoenix API.',
|
||||
app.get('/api/deploy-targets', (req, res) => {
|
||||
const config = loadDeployTargetsConfig();
|
||||
const targets = config.targets.map((entry) => ({
|
||||
repo: entry.repo,
|
||||
branch: entry.branch || 'main',
|
||||
target: entry.target || 'default',
|
||||
description: entry.description || '',
|
||||
cwd: entry.cwd || config.defaults.cwd || '',
|
||||
command: entry.command || [],
|
||||
has_healthcheck: Boolean(entry.healthcheck || config.defaults.healthcheck),
|
||||
}));
|
||||
res.json({
|
||||
config_path: config.path,
|
||||
count: targets.length,
|
||||
targets,
|
||||
});
|
||||
|
||||
if (PHOENIX_WEBHOOK_URL) {
|
||||
const payload = { event: 'deploy.completed', repo, branch, target: target || 'default', sha: commitSha, success: deploySuccess };
|
||||
const body = JSON.stringify(payload);
|
||||
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
|
||||
fetch(PHOENIX_WEBHOOK_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
|
||||
body,
|
||||
}).catch((e) => console.error('[webhook] outbound failed', e.message));
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
@@ -474,7 +895,10 @@ app.listen(PORT, () => {
|
||||
if (!GITEA_TOKEN) console.warn('GITEA_TOKEN not set — commit status updates disabled');
|
||||
if (!hasProxmox) console.warn('PROXMOX_* not set — Infra/VE API returns stub data');
|
||||
if (PHOENIX_WEBHOOK_URL) console.log('Outbound webhook enabled:', PHOENIX_WEBHOOK_URL);
|
||||
if (WEBHOOK_DEPLOY_ENABLED) console.log('Inbound webhook deploy execution enabled');
|
||||
if (PARTNER_KEYS.length > 0) console.log('Partner API key auth enabled for /api/v1/* (except GET /api/v1/public-sector/programs)');
|
||||
const mpath = resolvePublicSectorManifestPath();
|
||||
const dpath = resolveDeployTargetsPath();
|
||||
console.log(`Public-sector manifest: ${mpath} (${existsSync(mpath) ? 'ok' : 'missing'})`);
|
||||
console.log(`Deploy targets: ${dpath} (${existsSync(dpath) ? 'ok' : 'missing'})`);
|
||||
});
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
152
scripts/deployment/deploy-atomic-swap-dapp-5801.sh
Normal file
152
scripts/deployment/deploy-atomic-swap-dapp-5801.sh
Normal file
@@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
SUBMODULE_ROOT="$PROJECT_ROOT/atomic-swap-dapp"
|
||||
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
|
||||
PROXMOX_HOST="${PROXMOX_DAPP_HOST:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
|
||||
VMID="${VMID:-5801}"
|
||||
DEPLOY_ROOT="${DEPLOY_ROOT:-/var/www/atomic-swap}"
|
||||
TMP_ARCHIVE="/tmp/atomic-swap-dapp-5801.tgz"
|
||||
DIST_DIR="$SUBMODULE_ROOT/dist"
|
||||
SKIP_BUILD="${SKIP_BUILD:-0}"
|
||||
SSH_OPTS="${SSH_OPTS:--o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new}"
|
||||
|
||||
cleanup() {
|
||||
rm -f "$TMP_ARCHIVE"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
if [ ! -d "$SUBMODULE_ROOT" ]; then
|
||||
echo "Missing submodule at $SUBMODULE_ROOT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SUBMODULE_ROOT"
|
||||
if [ "$SKIP_BUILD" != "1" ]; then
|
||||
if [ -f package-lock.json ]; then
|
||||
npm ci >/dev/null
|
||||
else
|
||||
npm install >/dev/null
|
||||
fi
|
||||
npm run sync:ecosystem >/dev/null
|
||||
npm run validate:manifest >/dev/null
|
||||
npm run build >/dev/null
|
||||
fi
|
||||
|
||||
for required_path in \
|
||||
"$DIST_DIR/index.html" \
|
||||
"$DIST_DIR/data/ecosystem-manifest.json" \
|
||||
"$DIST_DIR/data/live-route-registry.json" \
|
||||
"$DIST_DIR/data/deployed-venue-inventory.json"; do
|
||||
if [ ! -f "$required_path" ]; then
|
||||
echo "Missing required build artifact: $required_path" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
jq -e '.supportedNetworks[] | select(.chainId == 138) | .deployedVenuePoolCount >= 19 and .publicRoutingPoolCount >= 19' \
|
||||
"$DIST_DIR/data/ecosystem-manifest.json" >/dev/null
|
||||
jq -e '.liveSwapRoutes | length >= 19' "$DIST_DIR/data/live-route-registry.json" >/dev/null
|
||||
jq -e '.liveBridgeRoutes | length >= 12' "$DIST_DIR/data/live-route-registry.json" >/dev/null
|
||||
jq -e '.networks[] | select(.chainId == 138) | .venueCounts.deployedVenuePoolCount >= 19 and .summary.totalVenues >= 19' \
|
||||
"$DIST_DIR/data/deployed-venue-inventory.json" >/dev/null
|
||||
|
||||
rm -f "$TMP_ARCHIVE"
|
||||
tar -C "$SUBMODULE_ROOT" -czf "$TMP_ARCHIVE" dist
|
||||
|
||||
ssh $SSH_OPTS "root@$PROXMOX_HOST" true
|
||||
scp -q $SSH_OPTS "$TMP_ARCHIVE" "root@$PROXMOX_HOST:/tmp/atomic-swap-dapp-5801.tgz"
|
||||
|
||||
ssh $SSH_OPTS "root@$PROXMOX_HOST" "
|
||||
set -euo pipefail
|
||||
pct push $VMID /tmp/atomic-swap-dapp-5801.tgz /tmp/atomic-swap-dapp-5801.tgz
|
||||
pct exec $VMID -- bash -lc '
|
||||
set -euo pipefail
|
||||
mkdir -p \"$DEPLOY_ROOT\"
|
||||
find \"$DEPLOY_ROOT\" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
|
||||
rm -rf /tmp/dist
|
||||
tar -xzf /tmp/atomic-swap-dapp-5801.tgz -C /tmp
|
||||
cp -R /tmp/dist/. \"$DEPLOY_ROOT/\"
|
||||
mkdir -p /var/cache/nginx/atomic-swap-api
|
||||
cat > /etc/nginx/conf.d/atomic-swap-api-cache.conf <<\"EOF\"
|
||||
proxy_cache_path /var/cache/nginx/atomic-swap-api
|
||||
levels=1:2
|
||||
keys_zone=atomic_swap_api_cache:10m
|
||||
max_size=256m
|
||||
inactive=30m
|
||||
use_temp_path=off;
|
||||
EOF
|
||||
cat > /etc/nginx/sites-available/atomic-swap <<\"EOF\"
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
|
||||
root $DEPLOY_ROOT;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files \$uri \$uri/ /index.html;
|
||||
}
|
||||
|
||||
location = /index.html {
|
||||
add_header Cache-Control \"no-store, no-cache, must-revalidate\" always;
|
||||
}
|
||||
|
||||
location /data/ {
|
||||
add_header Cache-Control \"no-store, no-cache, must-revalidate\" always;
|
||||
}
|
||||
|
||||
location /assets/ {
|
||||
add_header Cache-Control \"public, max-age=31536000, immutable\" always;
|
||||
}
|
||||
|
||||
location /api/v1/ {
|
||||
proxy_pass https://explorer.d-bis.org/api/v1/;
|
||||
proxy_ssl_server_name on;
|
||||
proxy_set_header Host explorer.d-bis.org;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host \$host;
|
||||
proxy_http_version 1.1;
|
||||
proxy_buffering on;
|
||||
proxy_cache atomic_swap_api_cache;
|
||||
proxy_cache_methods GET HEAD;
|
||||
proxy_cache_key \"\$scheme\$proxy_host\$request_uri\";
|
||||
proxy_cache_lock on;
|
||||
proxy_cache_lock_timeout 10s;
|
||||
proxy_cache_lock_age 10s;
|
||||
proxy_cache_background_update on;
|
||||
proxy_cache_revalidate on;
|
||||
proxy_cache_valid 200 10s;
|
||||
proxy_cache_valid 404 1s;
|
||||
proxy_cache_valid any 0;
|
||||
proxy_cache_use_stale error timeout invalid_header updating http_429 http_500 http_502 http_503 http_504;
|
||||
add_header X-Atomic-Swap-Cache \$upstream_cache_status always;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
ln -sfn /etc/nginx/sites-available/atomic-swap /etc/nginx/sites-enabled/atomic-swap
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
rm -f /etc/nginx/sites-enabled/dapp
|
||||
nginx -t
|
||||
systemctl reload nginx
|
||||
curl -fsS http://127.0.0.1/index.html >/dev/null
|
||||
curl -fsS http://127.0.0.1/data/ecosystem-manifest.json >/dev/null
|
||||
curl -fsS http://127.0.0.1/data/live-route-registry.json >/dev/null
|
||||
curl -fsS http://127.0.0.1/data/deployed-venue-inventory.json >/dev/null
|
||||
rm -rf /tmp/dist /tmp/atomic-swap-dapp-5801.tgz
|
||||
'
|
||||
rm -f /tmp/atomic-swap-dapp-5801.tgz
|
||||
"
|
||||
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/ >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/ecosystem-manifest.json | jq -e '.supportedNetworks[] | select(.chainId == 138) | .deployedVenuePoolCount >= 19 and .publicRoutingPoolCount >= 19' >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/live-route-registry.json | jq -e '.liveSwapRoutes | length >= 19' >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/live-route-registry.json | jq -e '.liveBridgeRoutes | length >= 12' >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/deployed-venue-inventory.json | jq -e '.networks[] | select(.chainId == 138) | .venueCounts.deployedVenuePoolCount >= 19 and .summary.totalVenues >= 19' >/dev/null
|
||||
|
||||
echo "Deployed atomic-swap-dapp to VMID $VMID via $PROXMOX_HOST"
|
||||
@@ -1,171 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Enable the upstream Blockscout smart-contract verifier sidecar on CT 5000 and
|
||||
# wire Blockscout to use it.
|
||||
#
|
||||
# Usage:
|
||||
# bash scripts/deployment/ensure-blockscout-smart-contract-verifier-5000.sh --dry-run
|
||||
# bash scripts/deployment/ensure-blockscout-smart-contract-verifier-5000.sh --apply
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
|
||||
if [[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]]; then
|
||||
# shellcheck source=/dev/null
|
||||
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
|
||||
fi
|
||||
|
||||
HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
|
||||
VMID="${BLOCKSCOUT_DB_CT_VMID:-5000}"
|
||||
APPLY=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--apply) APPLY=1; shift ;;
|
||||
--dry-run) APPLY=0; shift ;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
read -r -d '' REMOTE_SCRIPT <<'EOF_REMOTE' || true
|
||||
set -euo pipefail
|
||||
cd /opt/blockscout
|
||||
|
||||
stamp="$(date +%Y%m%d_%H%M%S)"
|
||||
cp docker-compose.yml "docker-compose.yml.bak.${stamp}.pre_verifier"
|
||||
|
||||
cat > smart-contract-verifier.env <<'EOF_VERIFIER'
|
||||
SMART_CONTRACT_VERIFIER__SERVER__HTTP__ENABLED=true
|
||||
SMART_CONTRACT_VERIFIER__SERVER__HTTP__ADDR=0.0.0.0:8050
|
||||
SMART_CONTRACT_VERIFIER__SERVER__HTTP__MAX_BODY_SIZE=8388608
|
||||
SMART_CONTRACT_VERIFIER__SERVER__GRPC__ENABLED=false
|
||||
SMART_CONTRACT_VERIFIER__SERVER__GRPC__ADDR=0.0.0.0:8051
|
||||
SMART_CONTRACT_VERIFIER__SOLIDITY__ENABLED=true
|
||||
SMART_CONTRACT_VERIFIER__SOLIDITY__COMPILERS_DIR=/tmp/solidity-compilers
|
||||
SMART_CONTRACT_VERIFIER__SOLIDITY__REFRESH_VERSIONS_SCHEDULE=0 0 * * * * *
|
||||
SMART_CONTRACT_VERIFIER__SOLIDITY__FETCHER__LIST__LIST_URL=https://binaries.soliditylang.org/linux-amd64/list.json
|
||||
SMART_CONTRACT_VERIFIER__VYPER__ENABLED=true
|
||||
SMART_CONTRACT_VERIFIER__VYPER__COMPILERS_DIR=/tmp/vyper-compilers
|
||||
SMART_CONTRACT_VERIFIER__VYPER__REFRESH_VERSIONS_SCHEDULE=0 0 * * * * *
|
||||
SMART_CONTRACT_VERIFIER__VYPER__FETCHER__LIST__LIST_URL=https://raw.githubusercontent.com/blockscout/solc-bin/main/vyper.list.json
|
||||
SMART_CONTRACT_VERIFIER__SOURCIFY__ENABLED=true
|
||||
SMART_CONTRACT_VERIFIER__SOURCIFY__API_URL=https://sourcify.dev/server/
|
||||
SMART_CONTRACT_VERIFIER__SOURCIFY__VERIFICATION_ATTEMPTS=3
|
||||
SMART_CONTRACT_VERIFIER__SOURCIFY__REQUEST_TIMEOUT=10
|
||||
SMART_CONTRACT_VERIFIER__METRICS__ENABLED=false
|
||||
SMART_CONTRACT_VERIFIER__JAEGER__ENABLED=false
|
||||
EOF_VERIFIER
|
||||
|
||||
cat > docker-compose.yml <<'EOF_COMPOSE'
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
container_name: blockscout-postgres
|
||||
environment:
|
||||
POSTGRES_USER: blockscout
|
||||
POSTGRES_PASSWORD: blockscout
|
||||
POSTGRES_DB: blockscout
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- blockscout-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U blockscout"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
smart-contract-verifier:
|
||||
image: ghcr.io/blockscout/smart-contract-verifier:latest
|
||||
container_name: smart-contract-verifier
|
||||
env_file:
|
||||
- ./smart-contract-verifier.env
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- blockscout-network
|
||||
|
||||
blockscout:
|
||||
image: blockscout/blockscout:latest
|
||||
container_name: blockscout
|
||||
command: bin/blockscout start
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
smart-contract-verifier:
|
||||
condition: service_started
|
||||
environment:
|
||||
- DISABLE_WEBAPP=false
|
||||
- DISABLE_INDEXER=false
|
||||
- INDEXER_DISABLE_PENDING_TRANSACTIONS_FETCHER=true
|
||||
- DATABASE_URL=postgresql://blockscout:blockscout@postgres:5432/blockscout?sslmode=disable
|
||||
- ETHEREUM_JSONRPC_HTTP_URL=http://192.168.11.221:8545
|
||||
- ETHEREUM_JSONRPC_WS_URL=ws://192.168.11.221:8546
|
||||
- ETHEREUM_JSONRPC_TRACE_URL=http://192.168.11.221:8545
|
||||
- ETHEREUM_JSONRPC_VARIANT=besu
|
||||
- CHAIN_ID=138
|
||||
- COIN=ETH
|
||||
- BLOCKSCOUT_HOST=explorer.d-bis.org
|
||||
- BLOCKSCOUT_PROTOCOL=https
|
||||
- SECRET_KEY_BASE=73159c7d10b9a5a75ddf10710773078c078bf02124d35b72fa2a841b30b4f88c7c43e5caaf7f9f7f87d16dd66e7870931ae11039c428d1dedae187af762531fa
|
||||
- POOL_SIZE=50
|
||||
- POOL_SIZE_API=50
|
||||
- DATABASE_QUEUE_TARGET=5s
|
||||
- ECTO_USE_SSL=false
|
||||
- MICROSERVICE_SC_VERIFIER_ENABLED=true
|
||||
- MICROSERVICE_SC_VERIFIER_TYPE=sc_verifier
|
||||
- MICROSERVICE_SC_VERIFIER_URL=http://smart-contract-verifier:8050/
|
||||
ports:
|
||||
- "4000:4000"
|
||||
volumes:
|
||||
- blockscout-data:/app/apps/explorer/priv/static
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- blockscout-network
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
blockscout-data:
|
||||
|
||||
networks:
|
||||
blockscout-network:
|
||||
driver: bridge
|
||||
EOF_COMPOSE
|
||||
|
||||
docker pull ghcr.io/blockscout/smart-contract-verifier:latest
|
||||
docker rm -f blockscout 2>/dev/null || true
|
||||
docker rm -f smart-contract-verifier 2>/dev/null || true
|
||||
# Older docker-compose v1 can leave an orphaned auto-generated verifier container
|
||||
# that breaks recreation with a `ContainerConfig` KeyError. Clear it first.
|
||||
docker ps -a --format '{{.Names}}' | grep -E 'smart-contract-verifier$' | xargs -r docker rm -f
|
||||
|
||||
if command -v docker-compose >/dev/null 2>&1; then
|
||||
docker-compose -f docker-compose.yml up -d
|
||||
else
|
||||
docker compose -f docker-compose.yml up -d
|
||||
fi
|
||||
|
||||
sleep 10
|
||||
docker ps --format "table {{.Names}}\t{{.Status}}"
|
||||
curl -fsS http://127.0.0.1:4000/api/v2/smart-contracts/verification/config >/dev/null
|
||||
EOF_REMOTE
|
||||
|
||||
echo "Blockscout smart-contract verifier enablement"
|
||||
echo "Host: ${HOST}"
|
||||
echo "VMID: ${VMID}"
|
||||
echo
|
||||
|
||||
if (( APPLY == 0 )); then
|
||||
echo "[dry-run] Would patch /opt/blockscout/docker-compose.yml on CT ${VMID}, add smart-contract-verifier, and restart Blockscout."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ssh root@"${HOST}" "pct exec ${VMID} -- bash -lc $(printf '%q' "${REMOTE_SCRIPT}")"
|
||||
echo
|
||||
echo "[ok] Blockscout verifier sidecar enabled on CT ${VMID}."
|
||||
244
scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh
Executable file
244
scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh
Executable file
@@ -0,0 +1,244 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
source "$PROJECT_ROOT/scripts/lib/load-project-env.sh"
|
||||
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
|
||||
|
||||
PHOENIX_DEPLOY_WORKSPACE="${PHOENIX_DEPLOY_WORKSPACE:-}"
|
||||
PROXMOX_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
|
||||
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-root}"
|
||||
VMID="${CURRENCICOMBO_PHOENIX_VMID:-8604}"
|
||||
CT_IP="${IP_CURRENCICOMBO_PHOENIX:-10.160.0.14}"
|
||||
CT_REPO_DIR="${CT_REPO_DIR:-/var/lib/currencicombo/repo}"
|
||||
PUBLIC_URL="${PUBLIC_URL:-https://curucombo.xn--vov0g.com}"
|
||||
PUBLIC_DOMAIN="${PUBLIC_DOMAIN:-curucombo.xn--vov0g.com}"
|
||||
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS:-192.168.11.167}:81}"
|
||||
NPM_EMAIL="${NPM_EMAIL:-}"
|
||||
NPM_PASSWORD="${NPM_PASSWORD:-}"
|
||||
DRY_RUN=0
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Usage: phoenix-deploy-currencicombo-from-workspace.sh [--dry-run]
|
||||
|
||||
Requires:
|
||||
PHOENIX_DEPLOY_WORKSPACE Full staged CurrenciCombo checkout prepared by phoenix-deploy-api
|
||||
|
||||
This script:
|
||||
1. Packs the staged repo workspace.
|
||||
2. Pushes it into CT 8604 on r630-01.
|
||||
3. Ensures host prerequisites, install.sh, prune cron, and deploy script run in-CT.
|
||||
4. Updates the public NPMplus host so /api/* preserves the full path and supports SSE.
|
||||
5. Verifies the public portal + /api/ready end to end.
|
||||
USAGE
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--dry-run) DRY_RUN=1; shift ;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*) echo "unknown arg: $1" >&2; usage; exit 2 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
log() { printf '[currencicombo-phoenix] %s\n' "$*" >&2; }
|
||||
die() { printf '[currencicombo-phoenix][FATAL] %s\n' "$*" >&2; exit 1; }
|
||||
run() { if [[ "$DRY_RUN" -eq 1 ]]; then printf '[dry-run] %s\n' "$*" >&2; else eval "$*"; fi; }
|
||||
need_cmd() { command -v "$1" >/dev/null 2>&1 || die "missing required command: $1"; }
|
||||
|
||||
for cmd in ssh scp tar curl jq mktemp; do
|
||||
need_cmd "$cmd"
|
||||
done
|
||||
|
||||
[[ -n "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "PHOENIX_DEPLOY_WORKSPACE is required"
|
||||
[[ -d "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "staged workspace missing: $PHOENIX_DEPLOY_WORKSPACE"
|
||||
|
||||
if [[ "$DRY_RUN" -eq 0 ]]; then
|
||||
[[ -n "$NPM_EMAIL" ]] || die "NPM_EMAIL is required"
|
||||
[[ -n "$NPM_PASSWORD" ]] || die "NPM_PASSWORD is required"
|
||||
fi
|
||||
|
||||
SSH_TARGET="${PROXMOX_SSH_USER}@${PROXMOX_HOST}"
|
||||
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
|
||||
TMP_DIR="$(mktemp -d /tmp/currencicombo-phoenix-XXXXXX)"
|
||||
ARCHIVE_PATH="${TMP_DIR}/currencicombo-workspace.tgz"
|
||||
REMOTE_ARCHIVE="/tmp/$(basename "$ARCHIVE_PATH")"
|
||||
CT_ARCHIVE="/root/$(basename "$ARCHIVE_PATH")"
|
||||
NPM_COOKIE_JAR="${TMP_DIR}/npm-cookies.txt"
|
||||
cleanup() {
|
||||
rm -rf "$TMP_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
ssh_remote() {
|
||||
local cmd="$1"
|
||||
if [[ "$DRY_RUN" -eq 1 ]]; then
|
||||
printf '[dry-run] ssh %q %q\n' "$SSH_TARGET" "$cmd" >&2
|
||||
else
|
||||
ssh "${SSH_OPTS[@]}" "$SSH_TARGET" "$cmd"
|
||||
fi
|
||||
}
|
||||
|
||||
pct_exec_script() {
|
||||
local local_script="$1"
|
||||
local remote_script
|
||||
local ct_script
|
||||
remote_script="/tmp/$(basename "$local_script")"
|
||||
ct_script="/root/$(basename "$local_script")"
|
||||
run "scp ${SSH_OPTS[*]} '$local_script' '${SSH_TARGET}:${remote_script}'"
|
||||
ssh_remote "pct push ${VMID} '${remote_script}' '${ct_script}' --perms 0755 && rm -f '${remote_script}' && pct exec ${VMID} -- bash '${ct_script}' && pct exec ${VMID} -- rm -f '${ct_script}'"
|
||||
}
|
||||
|
||||
log "packing staged workspace from ${PHOENIX_DEPLOY_WORKSPACE}"
|
||||
run "tar -C '$PHOENIX_DEPLOY_WORKSPACE' --exclude='.git' --exclude='node_modules' --exclude='dist' --exclude='orchestrator/node_modules' --exclude='orchestrator/dist' -czf '$ARCHIVE_PATH' ."
|
||||
|
||||
log "ensuring CT ${VMID} is running on ${PROXMOX_HOST}"
|
||||
ssh_remote "pct start ${VMID} >/dev/null 2>&1 || true"
|
||||
|
||||
log "uploading staged archive to CT ${VMID}"
|
||||
run "scp ${SSH_OPTS[*]} '$ARCHIVE_PATH' '${SSH_TARGET}:${REMOTE_ARCHIVE}'"
|
||||
ssh_remote "pct push ${VMID} '${REMOTE_ARCHIVE}' '${CT_ARCHIVE}' && rm -f '${REMOTE_ARCHIVE}'"
|
||||
|
||||
CT_SCRIPT="${TMP_DIR}/currencicombo-ct-deploy.sh"
|
||||
cat > "$CT_SCRIPT" <<'EOF'
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
ARCHIVE_PATH="__CT_ARCHIVE__"
|
||||
REPO_DIR="__CT_REPO_DIR__"
|
||||
|
||||
need_pkg() {
|
||||
dpkg -s "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
apt-get update -qq
|
||||
for pkg in ca-certificates curl git jq postgresql redis-server rsync build-essential; do
|
||||
need_pkg "$pkg" || apt-get install -y -qq "$pkg"
|
||||
done
|
||||
|
||||
if ! command -v node >/dev/null 2>&1 || ! node -v 2>/dev/null | grep -q '^v20\.'; then
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
|
||||
apt-get install -y -qq nodejs
|
||||
fi
|
||||
|
||||
systemctl enable --now postgresql >/dev/null 2>&1 || true
|
||||
systemctl enable --now redis-server >/dev/null 2>&1 || true
|
||||
|
||||
if [[ ! -f /root/currencicombo-prephoenix-archive.tgz && -d /opt/currencicombo ]]; then
|
||||
tar -czf /root/currencicombo-prephoenix-archive.tgz /opt/currencicombo /etc/currencicombo 2>/dev/null || true
|
||||
fi
|
||||
|
||||
install -d -o root -g root -m 0755 "$(dirname "$REPO_DIR")"
|
||||
rm -rf "$REPO_DIR"
|
||||
mkdir -p "$REPO_DIR"
|
||||
tar -xzf "$ARCHIVE_PATH" -C "$REPO_DIR"
|
||||
rm -f "$ARCHIVE_PATH"
|
||||
|
||||
bash "$REPO_DIR/scripts/deployment/install.sh"
|
||||
bash "$REPO_DIR/scripts/deployment/install-prune-cron.sh"
|
||||
CC_GIT_REF=local bash "$REPO_DIR/scripts/deployment/deploy-currencicombo-8604.sh"
|
||||
systemctl is-active currencicombo-orchestrator.service currencicombo-webapp.service
|
||||
curl -fsS http://127.0.0.1:8080/ready
|
||||
curl -fsS http://127.0.0.1:3000/ >/dev/null
|
||||
EOF
|
||||
perl -0pi -e "s|__CT_ARCHIVE__|${CT_ARCHIVE//|/\\|}|g; s|__CT_REPO_DIR__|${CT_REPO_DIR//|/\\|}|g" "$CT_SCRIPT"
|
||||
|
||||
log "running install + deploy inside CT ${VMID}"
|
||||
pct_exec_script "$CT_SCRIPT"
|
||||
|
||||
if [[ "$DRY_RUN" -eq 0 ]]; then
|
||||
log "updating NPMplus proxy host for ${PUBLIC_DOMAIN}"
|
||||
AUTH_JSON="$(jq -nc --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')"
|
||||
TOKEN_RESPONSE="$(curl -sk -X POST "$NPM_URL/api/tokens" -H 'Content-Type: application/json' -d "$AUTH_JSON" -c "$NPM_COOKIE_JAR")"
|
||||
TOKEN="$(echo "$TOKEN_RESPONSE" | jq -r '.token // .accessToken // .access_token // .data.token // empty' 2>/dev/null)"
|
||||
USE_COOKIE_AUTH=0
|
||||
if [[ -z "$TOKEN" || "$TOKEN" == "null" ]]; then
|
||||
if echo "$TOKEN_RESPONSE" | jq -e '.expires' >/dev/null 2>&1; then
|
||||
USE_COOKIE_AUTH=1
|
||||
else
|
||||
die "NPMplus authentication failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
npm_api() {
|
||||
if [[ "$USE_COOKIE_AUTH" -eq 1 ]]; then
|
||||
curl -sk -b "$NPM_COOKIE_JAR" "$@"
|
||||
else
|
||||
curl -sk -H "Authorization: Bearer $TOKEN" "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
HOSTS_JSON="$(npm_api -X GET "$NPM_URL/api/nginx/proxy-hosts")"
|
||||
HOST_ID="$(echo "$HOSTS_JSON" | jq -r --arg domain "$PUBLIC_DOMAIN" '
|
||||
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
|
||||
| map(select(.domain_names | type == "array"))
|
||||
| map(select(any(.domain_names[]; . == $domain)))
|
||||
| .[0].id // empty
|
||||
')"
|
||||
[[ -n "$HOST_ID" ]] || die "NPMplus proxy host not found for ${PUBLIC_DOMAIN}"
|
||||
|
||||
ADVANCED_CONFIG="$(cat <<CFG
|
||||
location ^~ /api/ {
|
||||
proxy_pass http://${CT_IP}:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_set_header Connection \"\";
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
proxy_read_timeout 24h;
|
||||
proxy_send_timeout 24h;
|
||||
add_header Cache-Control \"no-cache\";
|
||||
}
|
||||
CFG
|
||||
)"
|
||||
|
||||
PAYLOAD="$(echo "$HOSTS_JSON" | jq -c --arg domain "$PUBLIC_DOMAIN" --arg host "$CT_IP" --arg advanced "$ADVANCED_CONFIG" '
|
||||
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
|
||||
| map(select(.domain_names | type == "array"))
|
||||
| map(select(any(.domain_names[]; . == $domain)))
|
||||
| .[0]
|
||||
| {
|
||||
domain_names,
|
||||
forward_scheme: (.forward_scheme // "http"),
|
||||
forward_host: $host,
|
||||
forward_port: 3000,
|
||||
access_list_id,
|
||||
certificate_id,
|
||||
ssl_forced,
|
||||
caching_enabled,
|
||||
block_exploits,
|
||||
advanced_config: $advanced,
|
||||
allow_websocket_upgrade,
|
||||
http2_support,
|
||||
hsts_enabled,
|
||||
hsts_subdomains,
|
||||
enabled
|
||||
}
|
||||
')"
|
||||
[[ -n "$PAYLOAD" && "$PAYLOAD" != "null" ]] || die "failed to build NPMplus update payload"
|
||||
UPDATE_RESPONSE="$(npm_api -X PUT "$NPM_URL/api/nginx/proxy-hosts/${HOST_ID}" -H 'Content-Type: application/json' -d "$PAYLOAD")"
|
||||
echo "$UPDATE_RESPONSE" | jq -e '.id != null' >/dev/null 2>&1 || die "NPMplus proxy host update failed"
|
||||
|
||||
log "running public smoke checks"
|
||||
HEADERS="$(curl -skI "$PUBLIC_URL/")"
|
||||
echo "$HEADERS" | grep -q '^HTTP/2 200' || die "public root is not HTTP 200"
|
||||
if echo "$HEADERS" | grep -qi '^x-nextjs-prerender:'; then
|
||||
die "old Next.js headers still present on public root"
|
||||
fi
|
||||
|
||||
curl -sk "$PUBLIC_URL/" | grep -F '<title>Solace Bank Group PLC — Treasury Management Portal</title>' >/dev/null || die "public title mismatch"
|
||||
READY_BODY="$(curl -sk "$PUBLIC_URL/api/ready")"
|
||||
echo "$READY_BODY" | grep -F '"ready":true' >/dev/null || die "public /api/ready failed"
|
||||
curl -skN --max-time 5 -H 'Accept: text/event-stream' "$PUBLIC_URL/api/plans/demo-pay-014/status/stream" | grep -F '"type":"connected"' >/dev/null || die "public SSE smoke failed"
|
||||
|
||||
log "capturing EXT-* blocker summary"
|
||||
ssh_remote "pct exec ${VMID} -- journalctl -u currencicombo-orchestrator.service -n 200 --no-pager | grep -E 'ExternalBlockers|EXT-' || true"
|
||||
fi
|
||||
|
||||
log "CurrenciCombo Phoenix deploy completed from ${PHOENIX_DEPLOY_WORKSPACE}"
|
||||
56
scripts/verify/check-gitea-branch-workflow-parity.sh
Normal file
56
scripts/verify/check-gitea-branch-workflow-parity.sh
Normal file
@@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
SOURCE_TARGET_PAIRS=(
|
||||
".gitea/workflow-sources/deploy-to-phoenix.yml:.gitea/workflows/deploy-to-phoenix.yml"
|
||||
".gitea/workflow-sources/validate-on-pr.yml:.gitea/workflows/validate-on-pr.yml"
|
||||
)
|
||||
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
|
||||
missing_ref=false
|
||||
for ref in "$REMOTE/main" "$REMOTE/master"; do
|
||||
if ! git rev-parse --verify "$ref" >/dev/null 2>&1; then
|
||||
missing_ref=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$missing_ref" == true ]]; then
|
||||
echo "[i] Skipping main/master workflow parity check ($REMOTE/main or $REMOTE/master not available)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for pair in "${SOURCE_TARGET_PAIRS[@]}"; do
|
||||
source="${pair%%:*}"
|
||||
target="${pair##*:}"
|
||||
|
||||
main_blob="$(git show "$REMOTE/main:$source" 2>/dev/null || true)"
|
||||
master_blob="$(git show "$REMOTE/master:$source" 2>/dev/null || true)"
|
||||
|
||||
if [[ -z "$main_blob" ]]; then
|
||||
main_blob="$(git show "$REMOTE/main:$target" 2>/dev/null || true)"
|
||||
fi
|
||||
if [[ -z "$master_blob" ]]; then
|
||||
master_blob="$(git show "$REMOTE/master:$target" 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
if [[ -z "$main_blob" || -z "$master_blob" ]]; then
|
||||
echo "[✗] Missing $source/$target on $REMOTE/main or $REMOTE/master" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$main_blob" != "$master_blob" ]]; then
|
||||
echo "[✗] Branch workflow drift: $source differs between $REMOTE/main and $REMOTE/master" >&2
|
||||
echo " Keep both deploy branches in lockstep for workflow-source files." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[✓] Branch parity OK for $source"
|
||||
done
|
||||
32
scripts/verify/check-gitea-workflows.sh
Normal file
32
scripts/verify/check-gitea-workflows.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
check_one() {
|
||||
local source_rel="$1"
|
||||
local target_rel="$2"
|
||||
|
||||
if [[ ! -f "$source_rel" ]]; then
|
||||
echo "[✗] Missing workflow source: $source_rel" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$target_rel" ]]; then
|
||||
echo "[✗] Missing generated workflow: $target_rel" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! diff -u "$source_rel" "$target_rel" >/dev/null; then
|
||||
echo "[✗] Workflow drift detected: $target_rel does not match $source_rel" >&2
|
||||
echo " Run: bash scripts/verify/sync-gitea-workflows.sh" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "[✓] $target_rel matches $source_rel"
|
||||
}
|
||||
|
||||
check_one ".gitea/workflow-sources/deploy-to-phoenix.yml" ".gitea/workflows/deploy-to-phoenix.yml"
|
||||
check_one ".gitea/workflow-sources/validate-on-pr.yml" ".gitea/workflows/validate-on-pr.yml"
|
||||
50
scripts/verify/check-pnpm-workspace-lockfile.sh
Executable file
50
scripts/verify/check-pnpm-workspace-lockfile.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
# Every path listed under "packages:" in pnpm-workspace.yaml must have a matching
|
||||
# importer entry in pnpm-lock.yaml. If one is missing, pnpm can fail in confusing
|
||||
# ways (e.g. pnpm outdated -r: Cannot read ... 'optionalDependencies').
|
||||
# Usage: bash scripts/verify/check-pnpm-workspace-lockfile.sh
|
||||
# Exit: 0 if check passes or pnpm is not used; 1 on mismatch.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
WS="${PROJECT_ROOT}/pnpm-workspace.yaml"
|
||||
LOCK="${PROJECT_ROOT}/pnpm-lock.yaml"
|
||||
|
||||
if [[ ! -f "$WS" ]] || [[ ! -f "$LOCK" ]]; then
|
||||
echo " (skip: pnpm-workspace.yaml or pnpm-lock.yaml not present at repo root)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Paths under the top-level `packages:` block only (stops at next top-level key)
|
||||
mapfile -t _paths < <(awk '
|
||||
/^packages:/ { p=1; next }
|
||||
p && /^[a-zA-Z]/ && $0 !~ /^packages/ { exit }
|
||||
p && /^[[:space:]]*-[[:space:]]/ {
|
||||
sub(/^[[:space:]]*-[[:space:]]+/, "")
|
||||
sub(/[[:space:]]*#.*/, "")
|
||||
gsub(/[[:space:]]+$/, "")
|
||||
if (length) print
|
||||
}
|
||||
' "$WS")
|
||||
|
||||
missing=()
|
||||
for relp in "${_paths[@]}"; do
|
||||
if [[ -z "$relp" ]]; then
|
||||
continue
|
||||
fi
|
||||
if ! grep -qFx " ${relp}:" "$LOCK"; then
|
||||
missing+=("$relp")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo "✗ pnpm lockfile is missing importer(s) for these workspace path(s):"
|
||||
printf ' %q\n' "${missing[@]}"
|
||||
echo " Run: pnpm install (at repo root) to refresh pnpm-lock.yaml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo " pnpm workspace / lockfile importers aligned (${#_paths[@]} path(s))."
|
||||
exit 0
|
||||
@@ -3,6 +3,7 @@
|
||||
# Use for CI or pre-deploy: dependencies, config files, optional genesis.
|
||||
# Usage: bash scripts/verify/run-all-validation.sh [--skip-genesis]
|
||||
# --skip-genesis: do not run validate-genesis.sh (default: run if smom-dbis-138 present).
|
||||
# Steps: dependencies, config files, cW* mesh matrix (if pair-discovery JSON exists), genesis.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@@ -24,15 +25,64 @@ bash "$SCRIPT_DIR/check-dependencies.sh" || log_err "check-dependencies failed"
|
||||
log_ok "Dependencies OK"
|
||||
echo ""
|
||||
|
||||
echo "1b. pnpm workspace vs lockfile..."
|
||||
if [[ -f "$PROJECT_ROOT/pnpm-workspace.yaml" ]]; then
|
||||
bash "$SCRIPT_DIR/check-pnpm-workspace-lockfile.sh" || log_err "pnpm lockfile / workspace drift"
|
||||
log_ok "pnpm lockfile aligned with workspace"
|
||||
else
|
||||
echo " (no pnpm-workspace.yaml at root — skip)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "1c. Gitea workflow source sync..."
|
||||
bash "$SCRIPT_DIR/check-gitea-workflows.sh" || log_err "Gitea workflow source drift"
|
||||
log_ok "Gitea workflows match source-of-truth files"
|
||||
echo ""
|
||||
|
||||
echo "1d. main/master workflow parity..."
|
||||
bash "$SCRIPT_DIR/check-gitea-branch-workflow-parity.sh" || log_err "main/master workflow parity drift"
|
||||
log_ok "main/master workflow parity OK"
|
||||
echo ""
|
||||
|
||||
echo "2. Config files..."
|
||||
bash "$SCRIPT_DIR/../validation/validate-config-files.sh" || log_err "validate-config-files failed"
|
||||
log_ok "Config validation OK"
|
||||
echo ""
|
||||
|
||||
if [[ "$SKIP_GENESIS" == true ]]; then
|
||||
echo "3. Genesis — skipped (--skip-genesis)"
|
||||
echo "3. cW* mesh matrix (deployment-status + Uni V2 pair-discovery)..."
|
||||
DISCOVERY_JSON="$PROJECT_ROOT/reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json"
|
||||
if [[ -f "$DISCOVERY_JSON" ]]; then
|
||||
MATRIX_JSON="$PROJECT_ROOT/reports/status/cw-mesh-deployment-matrix-latest.json"
|
||||
bash "$SCRIPT_DIR/build-cw-mesh-deployment-matrix.sh" --no-markdown --json-out "$MATRIX_JSON" || log_err "cw mesh matrix merge failed"
|
||||
log_ok "cW mesh matrix OK (also wrote $MATRIX_JSON)"
|
||||
else
|
||||
echo "3. Genesis (smom-dbis-138)..."
|
||||
echo " ($DISCOVERY_JSON missing — run: bash scripts/verify/build-promod-uniswap-v2-live-pair-discovery.sh)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "3b. deployment-status graph (cross-chain-pmm-lps)..."
|
||||
PMM_VALIDATE="$PROJECT_ROOT/cross-chain-pmm-lps/scripts/validate-deployment-status.cjs"
|
||||
if [[ -f "$PMM_VALIDATE" ]] && command -v node &>/dev/null; then
|
||||
node "$PMM_VALIDATE" || log_err "validate-deployment-status.cjs failed"
|
||||
log_ok "deployment-status.json rules OK"
|
||||
else
|
||||
echo " (skip: node or $PMM_VALIDATE missing)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "3c. External dependency blockers..."
|
||||
EXT_CHECK="$SCRIPT_DIR/check-external-dependencies.sh"
|
||||
if [[ -x "$EXT_CHECK" ]]; then
|
||||
bash "$EXT_CHECK" --advisory || true
|
||||
else
|
||||
echo " (skip: $EXT_CHECK missing)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
if [[ "$SKIP_GENESIS" == true ]]; then
|
||||
echo "4. Genesis — skipped (--skip-genesis)"
|
||||
else
|
||||
echo "4. Genesis (smom-dbis-138)..."
|
||||
GENESIS_SCRIPT="$PROJECT_ROOT/smom-dbis-138/scripts/validation/validate-genesis.sh"
|
||||
if [[ -x "$GENESIS_SCRIPT" ]]; then
|
||||
bash "$GENESIS_SCRIPT" || log_err "validate-genesis failed"
|
||||
|
||||
18
scripts/verify/sync-gitea-workflows.sh
Normal file
18
scripts/verify/sync-gitea-workflows.sh
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
sync_one() {
|
||||
local source_rel="$1"
|
||||
local target_rel="$2"
|
||||
|
||||
mkdir -p "$(dirname "$target_rel")"
|
||||
cp "$source_rel" "$target_rel"
|
||||
echo "[✓] Synced $target_rel from $source_rel"
|
||||
}
|
||||
|
||||
sync_one ".gitea/workflow-sources/deploy-to-phoenix.yml" ".gitea/workflows/deploy-to-phoenix.yml"
|
||||
sync_one ".gitea/workflow-sources/validate-on-pr.yml" ".gitea/workflows/validate-on-pr.yml"
|
||||
@@ -1,224 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Verify the Chain 138 native Uniswap v2 and SushiSwap deployments on Blockscout.
|
||||
#
|
||||
# Usage:
|
||||
# bash scripts/verify/verify-chain138-native-v2-blockscout.sh
|
||||
# bash scripts/verify/verify-chain138-native-v2-blockscout.sh --status-only
|
||||
# bash scripts/verify/verify-chain138-native-v2-blockscout.sh --only UniswapV2Factory,SushiSwapRouter
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
SMOM_ROOT="${PROJECT_ROOT}/smom-dbis-138"
|
||||
|
||||
if [[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]]; then
|
||||
# shellcheck source=/dev/null
|
||||
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
|
||||
fi
|
||||
|
||||
command -v forge >/dev/null 2>&1 || { echo "ERROR: forge not found"; exit 1; }
|
||||
command -v node >/dev/null 2>&1 || { echo "ERROR: node not found"; exit 1; }
|
||||
command -v cast >/dev/null 2>&1 || { echo "ERROR: cast not found"; exit 1; }
|
||||
command -v jq >/dev/null 2>&1 || { echo "ERROR: jq not found"; exit 1; }
|
||||
command -v curl >/dev/null 2>&1 || { echo "ERROR: curl not found"; exit 1; }
|
||||
|
||||
RPC_URL="${RPC_URL_138:-${CHAIN138_RPC_URL:-http://192.168.11.211:8545}}"
|
||||
BLOCKSCOUT_URL="${CHAIN138_BLOCKSCOUT_INTERNAL_URL:-http://${IP_BLOCKSCOUT:-192.168.11.140}:4000}"
|
||||
BLOCKSCOUT_API_BASE="${CHAIN138_BLOCKSCOUT_API_BASE:-${BLOCKSCOUT_URL}/api/v2}"
|
||||
BLOCKSCOUT_PUBLIC_API_BASE="${CHAIN138_BLOCKSCOUT_PUBLIC_API_BASE:-https://explorer.d-bis.org/api/v2}"
|
||||
VERIFIER_PORT="${FORGE_VERIFIER_PROXY_PORT:-3080}"
|
||||
FORGE_VERIFIER_URL="${FORGE_VERIFIER_URL:-http://127.0.0.1:${VERIFIER_PORT}/api}"
|
||||
|
||||
UNISWAP_JSON="${SMOM_ROOT}/deployments/chain138/uniswap-v2-native.json"
|
||||
SUSHI_JSON="${SMOM_ROOT}/deployments/chain138/sushiswap-native.json"
|
||||
|
||||
ONLY_LIST=""
|
||||
STATUS_ONLY=0
|
||||
PROXY_PID=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--only) ONLY_LIST="${2:-}"; shift 2 ;;
|
||||
--status-only) STATUS_ONLY=1; shift ;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
cleanup_proxy() {
|
||||
[[ -n "${PROXY_PID:-}" ]] && kill "${PROXY_PID}" 2>/dev/null || true
|
||||
}
|
||||
trap cleanup_proxy EXIT
|
||||
|
||||
should_handle() {
|
||||
local name="$1"
|
||||
[[ -n "${ONLY_LIST}" ]] && [[ ",${ONLY_LIST}," != *",${name},"* ]] && return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
log() { printf '%s\n' "$*"; }
|
||||
ok() { printf '[ok] %s\n' "$*"; }
|
||||
warn() { printf '[warn] %s\n' "$*" >&2; }
|
||||
fail() { printf '[fail] %s\n' "$*" >&2; exit 1; }
|
||||
|
||||
proxy_listening() {
|
||||
if command -v nc >/dev/null 2>&1; then
|
||||
nc -z -w 2 127.0.0.1 "${VERIFIER_PORT}" 2>/dev/null
|
||||
else
|
||||
timeout 2 bash -c "echo >/dev/tcp/127.0.0.1/${VERIFIER_PORT}" 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
start_proxy_if_needed() {
|
||||
if proxy_listening; then
|
||||
ok "Forge verification proxy already listening on ${VERIFIER_PORT}."
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Starting forge verification proxy on ${VERIFIER_PORT} -> ${BLOCKSCOUT_URL}"
|
||||
PORT="${VERIFIER_PORT}" BLOCKSCOUT_URL="${BLOCKSCOUT_URL}" node "${PROJECT_ROOT}/forge-verification-proxy/server.js" >/tmp/chain138-native-v2-blockscout-proxy.log 2>&1 &
|
||||
PROXY_PID=$!
|
||||
sleep 2
|
||||
proxy_listening || fail "Forge verification proxy failed to start. See /tmp/chain138-native-v2-blockscout-proxy.log"
|
||||
}
|
||||
|
||||
has_contract_bytecode() {
|
||||
local addr="$1"
|
||||
local code
|
||||
code="$(cast code "${addr}" --rpc-url "${RPC_URL}" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]')" || true
|
||||
[[ -n "${code}" && "${code}" != "0x" && "${code}" != "0x0" ]]
|
||||
}
|
||||
|
||||
verification_status_json() {
|
||||
local addr="$1"
|
||||
local raw
|
||||
local base
|
||||
for base in "${BLOCKSCOUT_API_BASE}" "${BLOCKSCOUT_PUBLIC_API_BASE}"; do
|
||||
raw="$(curl --max-time 20 -fsS "${base}/smart-contracts/${addr}" 2>/dev/null || true)"
|
||||
if [[ -n "${raw}" ]] && jq -e 'type == "object"' >/dev/null 2>&1 <<<"${raw}"; then
|
||||
printf '%s' "${raw}"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
is_verified() {
|
||||
local addr="$1"
|
||||
local expected_name="$2"
|
||||
local json name compiler
|
||||
json="$(verification_status_json "${addr}")" || return 1
|
||||
name="$(jq -r '.name // empty' <<<"${json}")"
|
||||
compiler="$(jq -r '.compiler_version // empty' <<<"${json}")"
|
||||
[[ -n "${name}" && -n "${compiler}" && "${name}" == "${expected_name}" ]]
|
||||
}
|
||||
|
||||
submit_verification() {
|
||||
local label="$1"
|
||||
local addr="$2"
|
||||
local path="$3"
|
||||
local expected_name="$4"
|
||||
local constructor_sig="$5"
|
||||
local compiler_version="$6"
|
||||
local force_flag="${7:-0}"
|
||||
shift 6
|
||||
local constructor_args=("$@")
|
||||
|
||||
start_proxy_if_needed
|
||||
has_contract_bytecode "${addr}" || fail "${label} has no bytecode at ${addr}"
|
||||
|
||||
if is_verified "${addr}" "${expected_name}"; then
|
||||
ok "${label} already verified on Blockscout."
|
||||
return 0
|
||||
fi
|
||||
|
||||
local cmd=(forge verify-contract "${addr}" "${path}" --chain-id 138 --verifier blockscout --verifier-url "${FORGE_VERIFIER_URL}" --rpc-url "${RPC_URL}" --flatten)
|
||||
[[ -n "${compiler_version}" ]] && cmd+=(--compiler-version "${compiler_version}")
|
||||
if [[ "${force_flag}" == "1" ]]; then
|
||||
cmd+=(--force)
|
||||
fi
|
||||
if [[ -n "${constructor_sig}" ]]; then
|
||||
local encoded
|
||||
encoded="$(cast abi-encode "${constructor_sig}" "${constructor_args[@]:1}")"
|
||||
cmd+=(--constructor-args "${encoded}")
|
||||
fi
|
||||
|
||||
log "Submitting Blockscout verification for ${label} (${addr})"
|
||||
if (cd "${SMOM_ROOT}" && "${cmd[@]}" 2>&1); then
|
||||
ok "${label} verification submission accepted."
|
||||
else
|
||||
warn "${label} verification submission did not complete cleanly. Check Blockscout manually."
|
||||
fi
|
||||
}
|
||||
|
||||
[[ -f "${UNISWAP_JSON}" ]] || fail "Missing deployment artifact ${UNISWAP_JSON}"
|
||||
[[ -f "${SUSHI_JSON}" ]] || fail "Missing deployment artifact ${SUSHI_JSON}"
|
||||
|
||||
UNISWAP_FACTORY="$(jq -r '.factory' "${UNISWAP_JSON}")"
|
||||
UNISWAP_ROUTER="$(jq -r '.router' "${UNISWAP_JSON}")"
|
||||
UNISWAP_WETH="$(jq -r '.weth' "${UNISWAP_JSON}")"
|
||||
UNISWAP_DEPLOYER="$(jq -r '.deployer' "${UNISWAP_JSON}")"
|
||||
|
||||
SUSHI_FACTORY="$(jq -r '.factory' "${SUSHI_JSON}")"
|
||||
SUSHI_ROUTER="$(jq -r '.router' "${SUSHI_JSON}")"
|
||||
SUSHI_WETH="$(jq -r '.weth' "${SUSHI_JSON}")"
|
||||
SUSHI_FEE_TO_SETTER="$(jq -r '.feeToSetter // .deployer' "${SUSHI_JSON}")"
|
||||
|
||||
log "Chain 138 native V2 Blockscout verification"
|
||||
log "RPC: ${RPC_URL}"
|
||||
log "Explorer API: ${BLOCKSCOUT_API_BASE}"
|
||||
log
|
||||
|
||||
if (( STATUS_ONLY )); then
|
||||
should_handle "UniswapV2Factory" && { is_verified "${UNISWAP_FACTORY}" "UniswapV2Factory" && ok "UniswapV2Factory already verified on Blockscout." || warn "UniswapV2Factory not yet verified on Blockscout."; }
|
||||
should_handle "UniswapV2Router" && { is_verified "${UNISWAP_ROUTER}" "UniswapV2Router02" && ok "UniswapV2Router already verified on Blockscout." || warn "UniswapV2Router not yet verified on Blockscout."; }
|
||||
should_handle "SushiSwapFactory" && { is_verified "${SUSHI_FACTORY}" "UniswapV2Factory" && ok "SushiSwapFactory already verified on Blockscout." || warn "SushiSwapFactory not yet verified on Blockscout."; }
|
||||
should_handle "SushiSwapRouter" && { is_verified "${SUSHI_ROUTER}" "UniswapV2Router02" && ok "SushiSwapRouter already verified on Blockscout." || warn "SushiSwapRouter not yet verified on Blockscout."; }
|
||||
exit 0
|
||||
fi
|
||||
|
||||
should_handle "UniswapV2Factory" && submit_verification \
|
||||
"UniswapV2Factory" \
|
||||
"${UNISWAP_FACTORY}" \
|
||||
"contracts/vendor/uniswap-v2-core/UniswapV2Factory.sol:UniswapV2Factory" \
|
||||
"UniswapV2Factory" \
|
||||
"constructor(address)" \
|
||||
"v0.5.16+commit.9c3226ce" \
|
||||
"1" \
|
||||
"${UNISWAP_DEPLOYER}"
|
||||
|
||||
should_handle "UniswapV2Router" && submit_verification \
|
||||
"UniswapV2Router" \
|
||||
"${UNISWAP_ROUTER}" \
|
||||
"contracts/vendor/uniswap-v2-periphery/UniswapV2Router02.sol:UniswapV2Router02" \
|
||||
"UniswapV2Router02" \
|
||||
"constructor(address,address)" \
|
||||
"v0.6.6+commit.6c089d02" \
|
||||
"1" \
|
||||
"${UNISWAP_FACTORY}" "${UNISWAP_WETH}"
|
||||
|
||||
should_handle "SushiSwapFactory" && submit_verification \
|
||||
"SushiSwapFactory" \
|
||||
"${SUSHI_FACTORY}" \
|
||||
"contracts/vendor/sushiswap-v2/UniswapV2Factory.sol:UniswapV2Factory" \
|
||||
"UniswapV2Factory" \
|
||||
"constructor(address)" \
|
||||
"v0.6.12+commit.27d51765" \
|
||||
"1" \
|
||||
"${SUSHI_FEE_TO_SETTER}"
|
||||
|
||||
should_handle "SushiSwapRouter" && submit_verification \
|
||||
"SushiSwapRouter" \
|
||||
"${SUSHI_ROUTER}" \
|
||||
"contracts/vendor/sushiswap-v2/UniswapV2Router02.sol:UniswapV2Router02" \
|
||||
"UniswapV2Router02" \
|
||||
"constructor(address,address)" \
|
||||
"v0.6.12+commit.27d51765" \
|
||||
"1" \
|
||||
"${SUSHI_FACTORY}" "${SUSHI_WETH}"
|
||||
|
||||
log
|
||||
ok "Chain 138 native V2 verification flow complete."
|
||||
@@ -1,545 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Verify the deployed Chain 138 route execution stack and pilot venue contracts on Blockscout.
|
||||
#
|
||||
# Usage:
|
||||
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh
|
||||
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh --status-only
|
||||
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh --no-wait
|
||||
# bash scripts/verify/verify-chain138-route-execution-stack-blockscout.sh --only EnhancedSwapRouterV2,Chain138PilotCurve3Pool
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
SMOM_ROOT="${PROJECT_ROOT}/smom-dbis-138"
|
||||
|
||||
if [[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]]; then
|
||||
# shellcheck source=/dev/null
|
||||
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
|
||||
fi
|
||||
|
||||
command -v forge >/dev/null 2>&1 || { echo "ERROR: forge not found"; exit 1; }
|
||||
command -v node >/dev/null 2>&1 || { echo "ERROR: node not found"; exit 1; }
|
||||
command -v cast >/dev/null 2>&1 || { echo "ERROR: cast not found"; exit 1; }
|
||||
command -v jq >/dev/null 2>&1 || { echo "ERROR: jq not found"; exit 1; }
|
||||
command -v curl >/dev/null 2>&1 || { echo "ERROR: curl not found"; exit 1; }
|
||||
|
||||
RPC_URL="${RPC_URL_138:-${CHAIN138_RPC_URL:-http://192.168.11.211:8545}}"
|
||||
BLOCKSCOUT_URL="${CHAIN138_BLOCKSCOUT_INTERNAL_URL:-http://${IP_BLOCKSCOUT:-192.168.11.140}:4000}"
|
||||
BLOCKSCOUT_API_BASE="${CHAIN138_BLOCKSCOUT_API_BASE:-${BLOCKSCOUT_URL}/api/v2}"
|
||||
BLOCKSCOUT_PUBLIC_API_BASE="${CHAIN138_BLOCKSCOUT_PUBLIC_API_BASE:-https://explorer.d-bis.org/api/v2}"
|
||||
VERIFIER_PORT="${FORGE_VERIFIER_PROXY_PORT:-3080}"
|
||||
FORGE_VERIFIER_URL="${FORGE_VERIFIER_URL:-http://127.0.0.1:${VERIFIER_PORT}/api}"
|
||||
ROUTE_STACK_SOLC_VERSION="${ROUTE_STACK_SOLC_VERSION:-v0.8.20+commit.a1b79de6}"
|
||||
ROUTE_STACK_EVM_VERSION="${ROUTE_STACK_EVM_VERSION:-shanghai}"
|
||||
ROUTE_STACK_OPT_RUNS="${ROUTE_STACK_OPT_RUNS:-200}"
|
||||
|
||||
ONLY_LIST=""
|
||||
STATUS_ONLY=0
|
||||
NO_WAIT=0
|
||||
PROXY_PID=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--only) ONLY_LIST="${2:-}"; shift 2 ;;
|
||||
--status-only) STATUS_ONLY=1; shift ;;
|
||||
--no-wait) NO_WAIT=1; shift ;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
cleanup_proxy() {
|
||||
[[ -n "${PROXY_PID:-}" ]] && kill "${PROXY_PID}" 2>/dev/null || true
|
||||
}
|
||||
trap cleanup_proxy EXIT
|
||||
|
||||
should_handle() {
|
||||
local name="$1"
|
||||
[[ -n "${ONLY_LIST}" ]] && [[ ",${ONLY_LIST}," != *",${name},"* ]] && return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
log() { printf '%s\n' "$*"; }
|
||||
ok() { printf '[ok] %s\n' "$*"; }
|
||||
warn() { printf '[warn] %s\n' "$*" >&2; }
|
||||
fail() { printf '[fail] %s\n' "$*" >&2; exit 1; }
|
||||
|
||||
proxy_listening() {
|
||||
if command -v nc >/dev/null 2>&1; then
|
||||
nc -z -w 2 127.0.0.1 "${VERIFIER_PORT}" 2>/dev/null
|
||||
else
|
||||
timeout 2 bash -c "echo >/dev/tcp/127.0.0.1/${VERIFIER_PORT}" 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
start_proxy_if_needed() {
|
||||
if proxy_listening; then
|
||||
ok "Forge verification proxy already listening on ${VERIFIER_PORT}."
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Starting forge verification proxy on ${VERIFIER_PORT} -> ${BLOCKSCOUT_URL}"
|
||||
PORT="${VERIFIER_PORT}" BLOCKSCOUT_URL="${BLOCKSCOUT_URL}" node "${PROJECT_ROOT}/forge-verification-proxy/server.js" >/tmp/chain138-route-execution-blockscout-proxy.log 2>&1 &
|
||||
PROXY_PID=$!
|
||||
sleep 2
|
||||
proxy_listening || fail "Forge verification proxy failed to start. See /tmp/chain138-route-execution-blockscout-proxy.log"
|
||||
}
|
||||
|
||||
has_contract_bytecode() {
|
||||
local addr="$1"
|
||||
local code
|
||||
code="$(cast code "${addr}" --rpc-url "${RPC_URL}" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]')" || true
|
||||
[[ -n "${code}" && "${code}" != "0x" && "${code}" != "0x0" ]]
|
||||
}
|
||||
|
||||
verification_status_json() {
|
||||
local addr="$1"
|
||||
local raw
|
||||
local base
|
||||
for base in "${BLOCKSCOUT_API_BASE}" "${BLOCKSCOUT_PUBLIC_API_BASE}"; do
|
||||
raw="$(curl --max-time 20 -fsS "${base}/smart-contracts/${addr}" 2>/dev/null || true)"
|
||||
if [[ -n "${raw}" ]] && jq -e 'type == "object"' >/dev/null 2>&1 <<<"${raw}"; then
|
||||
printf '%s' "${raw}"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
is_verified() {
|
||||
local addr="$1"
|
||||
local expected_name="$2"
|
||||
local json name compiler
|
||||
json="$(verification_status_json "${addr}")" || return 1
|
||||
name="$(jq -r '.name // empty' <<<"${json}")"
|
||||
compiler="$(jq -r '.compiler_version // empty' <<<"${json}")"
|
||||
[[ -n "${name}" && -n "${compiler}" && "${name}" == "${expected_name}" ]]
|
||||
}
|
||||
|
||||
submit_verification() {
|
||||
local label="$1"
|
||||
local addr="$2"
|
||||
local path="$3"
|
||||
local expected_name="$4"
|
||||
local constructor_sig="$5"
|
||||
shift 5
|
||||
local constructor_args=("$@")
|
||||
|
||||
start_proxy_if_needed
|
||||
has_contract_bytecode "${addr}" || fail "${label} has no bytecode at ${addr}"
|
||||
|
||||
if is_verified "${addr}" "${expected_name}"; then
|
||||
ok "${label} already verified on Blockscout."
|
||||
return 0
|
||||
fi
|
||||
|
||||
local cmd=(forge verify-contract "${addr}" "${path}" --chain-id 138 --verifier blockscout --verifier-url "${FORGE_VERIFIER_URL}" --rpc-url "${RPC_URL}" --flatten)
|
||||
if [[ -n "${constructor_sig}" ]]; then
|
||||
local encoded
|
||||
encoded="$(cast abi-encode "${constructor_sig}" "${constructor_args[@]}")"
|
||||
cmd+=(--constructor-args "${encoded}")
|
||||
fi
|
||||
|
||||
log "Submitting Blockscout verification for ${label} (${addr})"
|
||||
if (cd "${SMOM_ROOT}" && "${cmd[@]}" 2>&1); then
|
||||
ok "${label} verification submission accepted."
|
||||
else
|
||||
warn "${label} verification submission did not complete cleanly. Check Blockscout manually."
|
||||
fi
|
||||
}
|
||||
|
||||
artifact_dbg_path() {
|
||||
case "$1" in
|
||||
EnhancedSwapRouterV2) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/EnhancedSwapRouterV2.sol/EnhancedSwapRouterV2.dbg.json" ;;
|
||||
IntentBridgeCoordinatorV2) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/IntentBridgeCoordinatorV2.sol/IntentBridgeCoordinatorV2.dbg.json" ;;
|
||||
DodoRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/adapters/DodoRouteExecutorAdapter.sol/DodoRouteExecutorAdapter.dbg.json" ;;
|
||||
DodoV3RouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/adapters/DodoV3RouteExecutorAdapter.sol/DodoV3RouteExecutorAdapter.dbg.json" ;;
|
||||
UniswapV3RouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/adapters/UniswapV3RouteExecutorAdapter.sol/UniswapV3RouteExecutorAdapter.dbg.json" ;;
|
||||
BalancerRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/adapters/BalancerRouteExecutorAdapter.sol/BalancerRouteExecutorAdapter.dbg.json" ;;
|
||||
CurveRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/adapters/CurveRouteExecutorAdapter.sol/CurveRouteExecutorAdapter.dbg.json" ;;
|
||||
OneInchRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/adapters/OneInchRouteExecutorAdapter.sol/OneInchRouteExecutorAdapter.dbg.json" ;;
|
||||
Chain138PilotUniswapV3Router) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotUniswapV3Router.dbg.json" ;;
|
||||
Chain138PilotBalancerVault) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotBalancerVault.dbg.json" ;;
|
||||
Chain138PilotCurve3Pool) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotCurve3Pool.dbg.json" ;;
|
||||
Chain138PilotOneInchAggregationRouter) printf '%s' "${SMOM_ROOT}/artifacts/contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol/Chain138PilotOneInchAggregationRouter.dbg.json" ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
foundry_artifact_json_path() {
|
||||
case "$1" in
|
||||
EnhancedSwapRouterV2) printf '%s' "${SMOM_ROOT}/out/EnhancedSwapRouterV2.sol/EnhancedSwapRouterV2.json" ;;
|
||||
IntentBridgeCoordinatorV2) printf '%s' "${SMOM_ROOT}/out/IntentBridgeCoordinatorV2.sol/IntentBridgeCoordinatorV2.json" ;;
|
||||
DodoRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/out/DodoRouteExecutorAdapter.sol/DodoRouteExecutorAdapter.json" ;;
|
||||
DodoV3RouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/out/DodoV3RouteExecutorAdapter.sol/DodoV3RouteExecutorAdapter.json" ;;
|
||||
UniswapV3RouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/out/UniswapV3RouteExecutorAdapter.sol/UniswapV3RouteExecutorAdapter.json" ;;
|
||||
BalancerRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/out/BalancerRouteExecutorAdapter.sol/BalancerRouteExecutorAdapter.json" ;;
|
||||
CurveRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/out/CurveRouteExecutorAdapter.sol/CurveRouteExecutorAdapter.json" ;;
|
||||
OneInchRouteExecutorAdapter) printf '%s' "${SMOM_ROOT}/out/OneInchRouteExecutorAdapter.sol/OneInchRouteExecutorAdapter.json" ;;
|
||||
Chain138PilotUniswapV3Router|Chain138PilotBalancerVault|Chain138PilotCurve3Pool|Chain138PilotOneInchAggregationRouter)
|
||||
printf '%s' "${SMOM_ROOT}/out/Chain138PilotDexVenues.sol/${1}.json"
|
||||
;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
runtime_hash_report() {
|
||||
local label="$1"
|
||||
local addr="$2"
|
||||
local artifact_json artifact_runtime chain_runtime
|
||||
|
||||
artifact_json="$(foundry_artifact_json_path "${label}")" || return 0
|
||||
[[ -f "${artifact_json}" ]] || return 0
|
||||
|
||||
artifact_runtime="$(jq -r '.deployedBytecode.object // empty' "${artifact_json}" | tr '[:upper:]' '[:lower:]')"
|
||||
chain_runtime="$(cast code "${addr}" --rpc-url "${RPC_URL}" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]')" || true
|
||||
|
||||
[[ -n "${artifact_runtime}" && -n "${chain_runtime}" && "${chain_runtime}" != "0x" ]] || return 0
|
||||
|
||||
if [[ "${artifact_runtime}" != "${chain_runtime}" ]]; then
|
||||
warn "${label}: Foundry artifact runtime bytecode does not match deployed bytecode."
|
||||
warn "${label}: artifact_keccak=$(cast keccak "${artifact_runtime}") chain_keccak=$(cast keccak "${chain_runtime}")"
|
||||
fi
|
||||
}
|
||||
|
||||
submit_standard_input_from_artifact() {
|
||||
local label="$1"
|
||||
local addr="$2"
|
||||
local contract_path="$3"
|
||||
local constructor_args="$4"
|
||||
local dbg build input_file compiler_version evm_version optimization_runs optimization_enabled license_type response message
|
||||
|
||||
dbg="$(artifact_dbg_path "${label}")" || fail "${label}: missing dbg path mapping"
|
||||
[[ -f "${dbg}" ]] || fail "${label}: missing dbg artifact ${dbg}"
|
||||
build="$(jq -r '.buildInfo // .build_info // empty' "${dbg}")"
|
||||
[[ -n "${build}" && "${build}" != "null" ]] || fail "${label}: missing build-info reference in ${dbg}"
|
||||
build="$(cd "$(dirname "${dbg}")" && realpath "${build}")"
|
||||
[[ -f "${build}" ]] || fail "${label}: missing build-info file ${build}"
|
||||
|
||||
input_file="$(mktemp)"
|
||||
python3 - "${dbg}" "${build}" "${input_file}" "${contract_path%%:*}" <<'PY'
|
||||
import json
|
||||
import os
|
||||
import posixpath
|
||||
import re
|
||||
import sys
|
||||
|
||||
dbg_path, build_path, out_path, fallback_source = sys.argv[1:5]
|
||||
with open(dbg_path, "r", encoding="utf-8") as fh:
|
||||
dbg = json.load(fh)
|
||||
with open(build_path, "r", encoding="utf-8") as fh:
|
||||
build = json.load(fh)
|
||||
|
||||
source_name = dbg.get("sourceName") or dbg.get("source_name") or fallback_source
|
||||
if not source_name:
|
||||
raise SystemExit(f"missing sourceName in {dbg_path}")
|
||||
|
||||
input_data = build["input"]
|
||||
sources = input_data.get("sources", {})
|
||||
if source_name not in sources:
|
||||
raise SystemExit(f"source {source_name} missing from build-info input")
|
||||
|
||||
import_re = re.compile(r'import\s+(?:[^;]*?\s+from\s+)?["\']([^"\']+)["\']\s*;')
|
||||
|
||||
closure = set()
|
||||
stack = [source_name]
|
||||
while stack:
|
||||
current = stack.pop()
|
||||
if current in closure or current not in sources:
|
||||
continue
|
||||
closure.add(current)
|
||||
content = sources[current].get("content", "")
|
||||
for entry in import_re.findall(content):
|
||||
if entry.startswith("."):
|
||||
target = posixpath.normpath(posixpath.join(posixpath.dirname(current), entry))
|
||||
else:
|
||||
target = entry
|
||||
if target in sources and target not in closure:
|
||||
stack.append(target)
|
||||
|
||||
reduced = json.loads(json.dumps(input_data))
|
||||
reduced["sources"] = {name: sources[name] for name in sorted(closure)}
|
||||
|
||||
with open(out_path, "w", encoding="utf-8") as fh:
|
||||
json.dump(reduced, fh, separators=(",", ":"))
|
||||
PY
|
||||
compiler_version="$(jq -r '.solcLongVersion | "v" + .' "${build}")"
|
||||
evm_version="$(jq -r '.input.settings.evmVersion // "default"' "${build}")"
|
||||
optimization_runs="$(jq -r '.input.settings.optimizer.runs // 200' "${build}")"
|
||||
optimization_enabled="$(jq -r '.input.settings.optimizer.enabled // true' "${build}")"
|
||||
license_type="mit"
|
||||
|
||||
response="$(
|
||||
curl --max-time 180 -fsS -X POST \
|
||||
-F "compiler_version=${compiler_version}" \
|
||||
-F "contract_name=${contract_path}" \
|
||||
-F "autodetect_constructor_args=false" \
|
||||
-F "constructor_args=${constructor_args}" \
|
||||
-F "optimization_runs=${optimization_runs}" \
|
||||
-F "is_optimization_enabled=${optimization_enabled}" \
|
||||
-F "evm_version=${evm_version}" \
|
||||
-F "license_type=${license_type}" \
|
||||
-F "files[0]=@${input_file};type=application/json" \
|
||||
"${BLOCKSCOUT_URL}/api/v2/smart-contracts/${addr}/verification/via/standard-input"
|
||||
)" || {
|
||||
rm -f "${input_file}"
|
||||
fail "${label}: Blockscout standard-input submission failed."
|
||||
}
|
||||
rm -f "${input_file}"
|
||||
|
||||
message="$(jq -r '.message // empty' <<<"${response}")"
|
||||
if [[ "${message}" == "Smart-contract verification started" ]]; then
|
||||
ok "${label} standard-input verification submission accepted."
|
||||
return 0
|
||||
fi
|
||||
|
||||
warn "${label} standard-input verification returned: ${response}"
|
||||
return 1
|
||||
}
|
||||
|
||||
submit_standard_input_from_forge() {
|
||||
local label="$1"
|
||||
local addr="$2"
|
||||
local contract_path="$3"
|
||||
local constructor_args="$4"
|
||||
local input_file response message
|
||||
local compiler_version evm_version optimization_runs via_ir_flag artifact_json
|
||||
|
||||
compiler_version="${ROUTE_STACK_SOLC_VERSION}"
|
||||
evm_version="${ROUTE_STACK_EVM_VERSION}"
|
||||
optimization_runs="${ROUTE_STACK_OPT_RUNS}"
|
||||
via_ir_flag=(--via-ir)
|
||||
|
||||
artifact_json="$(foundry_artifact_json_path "${label}" || true)"
|
||||
if [[ -n "${artifact_json}" && -f "${artifact_json}" ]]; then
|
||||
compiler_version="v$(jq -r '.metadata.compiler.version // empty' "${artifact_json}")"
|
||||
evm_version="$(jq -r '.metadata.settings.evmVersion // "default"' "${artifact_json}")"
|
||||
optimization_runs="$(jq -r '.metadata.settings.optimizer.runs // 200' "${artifact_json}")"
|
||||
if [[ "$(jq -r '.metadata.settings.viaIR // false' "${artifact_json}")" != "true" ]]; then
|
||||
via_ir_flag=()
|
||||
fi
|
||||
fi
|
||||
|
||||
runtime_hash_report "${label}" "${addr}"
|
||||
input_file="$(mktemp)"
|
||||
(
|
||||
cd "${SMOM_ROOT}"
|
||||
forge verify-contract "${addr}" "${contract_path}" \
|
||||
--chain-id 138 \
|
||||
--root . \
|
||||
--compiler-version "${compiler_version}" \
|
||||
--num-of-optimizations "${optimization_runs}" \
|
||||
"${via_ir_flag[@]}" \
|
||||
--evm-version "${evm_version}" \
|
||||
--show-standard-json-input >"${input_file}"
|
||||
) || {
|
||||
rm -f "${input_file}"
|
||||
fail "${label}: failed to render Foundry standard-input from deployment sources."
|
||||
}
|
||||
|
||||
response="$(
|
||||
curl --max-time 180 -fsS -X POST \
|
||||
-F "compiler_version=${compiler_version}" \
|
||||
-F "contract_name=${contract_path}" \
|
||||
-F "autodetect_constructor_args=false" \
|
||||
-F "constructor_args=${constructor_args}" \
|
||||
-F "optimization_runs=${optimization_runs}" \
|
||||
-F "is_optimization_enabled=true" \
|
||||
-F "evm_version=${evm_version}" \
|
||||
-F "license_type=mit" \
|
||||
-F "files[0]=@${input_file};type=application/json" \
|
||||
"${BLOCKSCOUT_URL}/api/v2/smart-contracts/${addr}/verification/via/standard-input"
|
||||
)" || {
|
||||
rm -f "${input_file}"
|
||||
fail "${label}: Blockscout Foundry standard-input submission failed."
|
||||
}
|
||||
rm -f "${input_file}"
|
||||
|
||||
message="$(jq -r '.message // empty' <<<"${response}")"
|
||||
if [[ "${message}" == "Smart-contract verification started" ]]; then
|
||||
ok "${label} Foundry standard-input verification submission accepted."
|
||||
return 0
|
||||
fi
|
||||
|
||||
warn "${label} Foundry standard-input verification returned: ${response}"
|
||||
return 1
|
||||
}
|
||||
|
||||
submit_best_verification() {
|
||||
local label="$1"
|
||||
local addr="$2"
|
||||
local path="$3"
|
||||
local expected_name="$4"
|
||||
local constructor_sig="$5"
|
||||
shift 5
|
||||
local constructor_args=("$@")
|
||||
local encoded=""
|
||||
|
||||
if [[ -n "${constructor_sig}" ]]; then
|
||||
encoded="$(cast abi-encode "${constructor_sig}" "${constructor_args[@]}")"
|
||||
fi
|
||||
|
||||
# Prefer the Foundry deployment lineage for the route stack. The earlier
|
||||
# Hardhat dbg/build-info path drifted away from the actual deployed compiler/EVM
|
||||
# settings and is kept only as a compatibility fallback.
|
||||
if submit_standard_input_from_forge "${label}" "${addr}" "${path}" "${encoded}"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if artifact_dbg_path "${label}" >/dev/null 2>&1; then
|
||||
warn "${label}: falling back to artifact-derived standard-input after Foundry mismatch."
|
||||
submit_standard_input_from_artifact "${label}" "${addr}" "${path}" "${encoded}" || return 1
|
||||
return 0
|
||||
fi
|
||||
|
||||
warn "${label}: falling back to legacy Forge flattened verification path."
|
||||
submit_verification "${label}" "${addr}" "${path}" "${expected_name}" "${constructor_sig}" "${constructor_args[@]}"
|
||||
}
|
||||
|
||||
WETH="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
|
||||
USDT="0x004b63A7B5b0E06f6bB6adb4a5F9f590BF3182D1"
|
||||
USDC="0x71D6687F38b93CCad569Fa6352c876eea967201b"
|
||||
DAI_PLACEHOLDER="0x6B175474E89094C44Da98b954EedeAC495271d0F"
|
||||
|
||||
ROUTER_V2="$(jq -r '.chains["138"].contracts.EnhancedSwapRouterV2' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
COORDINATOR_V2="$(jq -r '.chains["138"].contracts.IntentBridgeCoordinatorV2' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
DODO_ADAPTER="$(jq -r '.chains["138"].contracts.DodoRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
DODO_V3_ADAPTER="$(jq -r '.chains["138"].contracts.DodoV3RouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
UNISWAP_V3_ADAPTER="$(jq -r '.chains["138"].contracts.UniswapV3RouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
BALANCER_ADAPTER="$(jq -r '.chains["138"].contracts.BalancerRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
CURVE_ADAPTER="$(jq -r '.chains["138"].contracts.CurveRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
ONEINCH_ADAPTER="$(jq -r '.chains["138"].contracts.OneInchRouteExecutorAdapter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
PILOT_UNISWAP="$(jq -r '.chains["138"].contracts.PilotUniswapV3Router' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
PILOT_BALANCER="$(jq -r '.chains["138"].contracts.PilotBalancerVault' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
PILOT_CURVE="$(jq -r '.chains["138"].contracts.PilotCurve3Pool' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
PILOT_ONEINCH="$(jq -r '.chains["138"].contracts.PilotOneInchRouter' "${PROJECT_ROOT}/config/smart-contracts-master.json")"
|
||||
|
||||
log "Chain 138 route execution stack Blockscout verification"
|
||||
log "RPC: ${RPC_URL}"
|
||||
log "Explorer API: ${BLOCKSCOUT_API_BASE}"
|
||||
log
|
||||
|
||||
if (( STATUS_ONLY )); then
|
||||
for pair in \
|
||||
"EnhancedSwapRouterV2:${ROUTER_V2}:EnhancedSwapRouterV2" \
|
||||
"IntentBridgeCoordinatorV2:${COORDINATOR_V2}:IntentBridgeCoordinatorV2" \
|
||||
"DodoRouteExecutorAdapter:${DODO_ADAPTER}:DodoRouteExecutorAdapter" \
|
||||
"DodoV3RouteExecutorAdapter:${DODO_V3_ADAPTER}:DodoV3RouteExecutorAdapter" \
|
||||
"UniswapV3RouteExecutorAdapter:${UNISWAP_V3_ADAPTER}:UniswapV3RouteExecutorAdapter" \
|
||||
"BalancerRouteExecutorAdapter:${BALANCER_ADAPTER}:BalancerRouteExecutorAdapter" \
|
||||
"CurveRouteExecutorAdapter:${CURVE_ADAPTER}:CurveRouteExecutorAdapter" \
|
||||
"OneInchRouteExecutorAdapter:${ONEINCH_ADAPTER}:OneInchRouteExecutorAdapter" \
|
||||
"Chain138PilotUniswapV3Router:${PILOT_UNISWAP}:Chain138PilotUniswapV3Router" \
|
||||
"Chain138PilotBalancerVault:${PILOT_BALANCER}:Chain138PilotBalancerVault" \
|
||||
"Chain138PilotCurve3Pool:${PILOT_CURVE}:Chain138PilotCurve3Pool" \
|
||||
"Chain138PilotOneInchAggregationRouter:${PILOT_ONEINCH}:Chain138PilotOneInchAggregationRouter"
|
||||
do
|
||||
IFS=":" read -r label addr expected <<<"${pair}"
|
||||
should_handle "${label}" || continue
|
||||
if is_verified "${addr}" "${expected}"; then
|
||||
ok "${label} already verified on Blockscout."
|
||||
else
|
||||
warn "${label} not yet verified on Blockscout."
|
||||
fi
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
should_handle "EnhancedSwapRouterV2" && submit_best_verification \
|
||||
"EnhancedSwapRouterV2" \
|
||||
"${ROUTER_V2}" \
|
||||
"contracts/bridge/trustless/EnhancedSwapRouterV2.sol:EnhancedSwapRouterV2" \
|
||||
"EnhancedSwapRouterV2" \
|
||||
"constructor(address,address,address,address)" \
|
||||
"${WETH}" "${USDT}" "${USDC}" "${DAI_PLACEHOLDER}"
|
||||
|
||||
should_handle "IntentBridgeCoordinatorV2" && submit_best_verification \
|
||||
"IntentBridgeCoordinatorV2" \
|
||||
"${COORDINATOR_V2}" \
|
||||
"contracts/bridge/trustless/IntentBridgeCoordinatorV2.sol:IntentBridgeCoordinatorV2" \
|
||||
"IntentBridgeCoordinatorV2" \
|
||||
"constructor(address)" \
|
||||
"${ROUTER_V2}"
|
||||
|
||||
should_handle "DodoRouteExecutorAdapter" && submit_best_verification \
|
||||
"DodoRouteExecutorAdapter" \
|
||||
"${DODO_ADAPTER}" \
|
||||
"contracts/bridge/trustless/adapters/DodoRouteExecutorAdapter.sol:DodoRouteExecutorAdapter" \
|
||||
"DodoRouteExecutorAdapter" \
|
||||
""
|
||||
|
||||
should_handle "DodoV3RouteExecutorAdapter" && submit_best_verification \
|
||||
"DodoV3RouteExecutorAdapter" \
|
||||
"${DODO_V3_ADAPTER}" \
|
||||
"contracts/bridge/trustless/adapters/DodoV3RouteExecutorAdapter.sol:DodoV3RouteExecutorAdapter" \
|
||||
"DodoV3RouteExecutorAdapter" \
|
||||
""
|
||||
|
||||
should_handle "UniswapV3RouteExecutorAdapter" && submit_best_verification \
|
||||
"UniswapV3RouteExecutorAdapter" \
|
||||
"${UNISWAP_V3_ADAPTER}" \
|
||||
"contracts/bridge/trustless/adapters/UniswapV3RouteExecutorAdapter.sol:UniswapV3RouteExecutorAdapter" \
|
||||
"UniswapV3RouteExecutorAdapter" \
|
||||
""
|
||||
|
||||
should_handle "BalancerRouteExecutorAdapter" && submit_best_verification \
|
||||
"BalancerRouteExecutorAdapter" \
|
||||
"${BALANCER_ADAPTER}" \
|
||||
"contracts/bridge/trustless/adapters/BalancerRouteExecutorAdapter.sol:BalancerRouteExecutorAdapter" \
|
||||
"BalancerRouteExecutorAdapter" \
|
||||
""
|
||||
|
||||
should_handle "CurveRouteExecutorAdapter" && submit_best_verification \
|
||||
"CurveRouteExecutorAdapter" \
|
||||
"${CURVE_ADAPTER}" \
|
||||
"contracts/bridge/trustless/adapters/CurveRouteExecutorAdapter.sol:CurveRouteExecutorAdapter" \
|
||||
"CurveRouteExecutorAdapter" \
|
||||
""
|
||||
|
||||
should_handle "OneInchRouteExecutorAdapter" && submit_best_verification \
|
||||
"OneInchRouteExecutorAdapter" \
|
||||
"${ONEINCH_ADAPTER}" \
|
||||
"contracts/bridge/trustless/adapters/OneInchRouteExecutorAdapter.sol:OneInchRouteExecutorAdapter" \
|
||||
"OneInchRouteExecutorAdapter" \
|
||||
""
|
||||
|
||||
should_handle "Chain138PilotUniswapV3Router" && submit_best_verification \
|
||||
"Chain138PilotUniswapV3Router" \
|
||||
"${PILOT_UNISWAP}" \
|
||||
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotUniswapV3Router" \
|
||||
"Chain138PilotUniswapV3Router" \
|
||||
""
|
||||
|
||||
should_handle "Chain138PilotBalancerVault" && submit_best_verification \
|
||||
"Chain138PilotBalancerVault" \
|
||||
"${PILOT_BALANCER}" \
|
||||
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotBalancerVault" \
|
||||
"Chain138PilotBalancerVault" \
|
||||
""
|
||||
|
||||
should_handle "Chain138PilotCurve3Pool" && submit_best_verification \
|
||||
"Chain138PilotCurve3Pool" \
|
||||
"${PILOT_CURVE}" \
|
||||
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotCurve3Pool" \
|
||||
"Chain138PilotCurve3Pool" \
|
||||
"constructor(address,address,address,uint256)" \
|
||||
"${USDT}" "${USDC}" "0x0000000000000000000000000000000000000000" "4"
|
||||
|
||||
should_handle "Chain138PilotOneInchAggregationRouter" && submit_best_verification \
|
||||
"Chain138PilotOneInchAggregationRouter" \
|
||||
"${PILOT_ONEINCH}" \
|
||||
"contracts/bridge/trustless/pilot/Chain138PilotDexVenues.sol:Chain138PilotOneInchAggregationRouter" \
|
||||
"Chain138PilotOneInchAggregationRouter" \
|
||||
""
|
||||
|
||||
if (( NO_WAIT )); then
|
||||
log
|
||||
ok "Chain 138 route execution stack verification submissions complete."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
log
|
||||
ok "Chain 138 route execution stack verification flow complete."
|
||||
@@ -273,16 +273,9 @@ submit_v2_standard_input() {
|
||||
|
||||
compiler_version="v0.8.16+commit.07a7930e"
|
||||
license_type="busl_1_1"
|
||||
input_file="$(mktemp)"
|
||||
|
||||
jq '
|
||||
del(.settings.libraries[""].__CACHE_BREAKER__)
|
||||
| if (.settings.libraries[""]? == {}) then del(.settings.libraries[""]) else . end
|
||||
| if (.settings.libraries? == {}) then del(.settings.libraries) else . end
|
||||
' "${input}" > "${input_file}"
|
||||
|
||||
response="$(
|
||||
curl --max-time 120 -fsS -X POST \
|
||||
curl --max-time 30 -fsS -X POST \
|
||||
-F "compiler_version=${compiler_version}" \
|
||||
-F "contract_name=${path}" \
|
||||
-F "autodetect_constructor_args=false" \
|
||||
@@ -290,13 +283,9 @@ submit_v2_standard_input() {
|
||||
-F "optimization_runs=200" \
|
||||
-F "is_optimization_enabled=true" \
|
||||
-F "license_type=${license_type}" \
|
||||
-F "files[0]=@${input_file};type=application/json" \
|
||||
-F "files[0]=@${input};type=application/json" \
|
||||
"${BLOCKSCOUT_URL}/api/v2/smart-contracts/${addr}/verification/via/standard-input"
|
||||
)" || {
|
||||
rm -f "${input_file}"
|
||||
fail "${label}: Blockscout standard-input submission failed."
|
||||
}
|
||||
rm -f "${input_file}"
|
||||
)" || fail "${label}: Blockscout standard-input submission failed."
|
||||
|
||||
message="$(jq -r '.message // empty' <<<"${response}")"
|
||||
if [[ "${message}" == "Smart-contract verification started" ]]; then
|
||||
|
||||
Reference in New Issue
Block a user