Compare commits
1 Commits
main
...
feat/gru-v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c840c0f777 |
@@ -6,10 +6,6 @@
|
||||
2. Make changes, ensure tests pass
|
||||
3. Open a pull request
|
||||
|
||||
Deploy workflow policy:
|
||||
`main` and `master` are both deploy-triggering branches, so `.gitea/workflow-sources/deploy-to-phoenix.yml` and `.gitea/workflow-sources/validate-on-pr.yml` must stay identical across both branches.
|
||||
Use `bash scripts/verify/sync-gitea-workflows.sh` after editing workflow-source files, and `bash scripts/verify/run-all-validation.sh --skip-genesis` to catch workflow drift before push.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
- Use the PR template when opening a PR
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
# Canonical deploy workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
name: Deploy to Phoenix
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
|
||||
# The cW* mesh matrix and deployment-status validators read
|
||||
# cross-chain-pmm-lps/config/*.json. The parent checkout does not
|
||||
# materialize submodules by default, and .gitmodules mixes public HTTPS
|
||||
# with SSH URLs, so clone only the required public validation dependency.
|
||||
- name: Materialize cross-chain-pmm-lps
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ ! -f cross-chain-pmm-lps/config/deployment-status.json ]; then
|
||||
rm -rf cross-chain-pmm-lps
|
||||
git clone --depth=1 \
|
||||
https://gitea.d-bis.org/d-bis/cross-chain-pmm-lps.git \
|
||||
cross-chain-pmm-lps
|
||||
fi
|
||||
|
||||
- name: Run repo validation gate
|
||||
run: |
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
|
||||
deploy:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Trigger Phoenix deployment
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
set +e
|
||||
curl -sSf --retry 3 --retry-connrefused --retry-delay 10 --retry-max-time 180 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"default\"}"
|
||||
rc="$?"
|
||||
set -e
|
||||
if [ "$rc" -eq 52 ]; then
|
||||
HEALTH_URL="${{ secrets.PHOENIX_DEPLOY_URL }}"
|
||||
HEALTH_URL="${HEALTH_URL%/api/deploy}/health"
|
||||
echo "Phoenix deploy API restarted during self-deploy; verifying ${HEALTH_URL}"
|
||||
for i in $(seq 1 12); do
|
||||
if curl -fsS --max-time 5 "$HEALTH_URL"; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
exit "$rc"
|
||||
|
||||
deploy-atomic-swap-dapp:
|
||||
needs: deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Trigger Atomic Swap dApp deployment (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf \
|
||||
--connect-timeout 10 --max-time 900 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"atomic-swap-dapp-live\"}"
|
||||
|
||||
# After app deploy, ask Phoenix to run path-gated Cloudflare DNS sync on the host that has
|
||||
# PHOENIX_REPO_ROOT + .env (not on this runner). Skips unless PHOENIX_CLOUDFLARE_SYNC=1 on that host.
|
||||
# continue-on-error: first-time or missing opt-in should not block the main deploy.
|
||||
cloudflare:
|
||||
needs:
|
||||
- deploy
|
||||
- deploy-atomic-swap-dapp
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Request Cloudflare DNS sync (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf --retry 5 --retry-all-errors --retry-connrefused --retry-delay 10 --retry-max-time 300 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"cloudflare-sync\"}" \
|
||||
|| { echo "Cloudflare DNS sync request failed; optional sync is non-blocking."; exit 0; }
|
||||
@@ -1,33 +0,0 @@
|
||||
# Canonical PR validation workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
# PR-only: push validation already runs in deploy-to-phoenix.yml; this gives PRs the same
|
||||
# no-LAN checks without the deploy job (and without deploy secrets).
|
||||
name: Validate (PR)
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
run-all-validation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
# Optional: set org/repo variable URA_STRICT_CLOSURE=1 to fail PRs while pilot placeholders
|
||||
# remain in manifest (see scripts/ura/validate-manifest-closure.mjs). Not enabled by default.
|
||||
- name: run-all-validation (no LAN, no genesis)
|
||||
env:
|
||||
URA_STRICT_CLOSURE: ${{ vars.URA_STRICT_CLOSURE }}
|
||||
run: bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
@@ -1,52 +1,11 @@
|
||||
# Canonical deploy workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
name: Deploy to Phoenix
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
|
||||
# The cW* mesh matrix and deployment-status validators read
|
||||
# cross-chain-pmm-lps/config/*.json. The parent checkout does not
|
||||
# materialize submodules by default, and .gitmodules mixes public HTTPS
|
||||
# with SSH URLs, so clone only the required public validation dependency.
|
||||
- name: Materialize cross-chain-pmm-lps
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ ! -f cross-chain-pmm-lps/config/deployment-status.json ]; then
|
||||
rm -rf cross-chain-pmm-lps
|
||||
git clone --depth=1 \
|
||||
https://gitea.d-bis.org/d-bis/cross-chain-pmm-lps.git \
|
||||
cross-chain-pmm-lps
|
||||
fi
|
||||
|
||||
- name: Run repo validation gate
|
||||
run: |
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
|
||||
deploy:
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -54,72 +13,8 @@ jobs:
|
||||
|
||||
- name: Trigger Phoenix deployment
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
set +e
|
||||
curl -sSf --retry 3 --retry-connrefused --retry-delay 10 --retry-max-time 180 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
curl -sSf -X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"default\"}"
|
||||
rc="$?"
|
||||
set -e
|
||||
if [ "$rc" -eq 52 ]; then
|
||||
HEALTH_URL="${{ secrets.PHOENIX_DEPLOY_URL }}"
|
||||
HEALTH_URL="${HEALTH_URL%/api/deploy}/health"
|
||||
echo "Phoenix deploy API restarted during self-deploy; verifying ${HEALTH_URL}"
|
||||
for i in $(seq 1 12); do
|
||||
if curl -fsS --max-time 5 "$HEALTH_URL"; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
exit "$rc"
|
||||
|
||||
deploy-atomic-swap-dapp:
|
||||
needs: deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Trigger Atomic Swap dApp deployment (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf \
|
||||
--connect-timeout 10 --max-time 900 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"atomic-swap-dapp-live\"}"
|
||||
|
||||
# After app deploy, ask Phoenix to run path-gated Cloudflare DNS sync on the host that has
|
||||
# PHOENIX_REPO_ROOT + .env (not on this runner). Skips unless PHOENIX_CLOUDFLARE_SYNC=1 on that host.
|
||||
# continue-on-error: first-time or missing opt-in should not block the main deploy.
|
||||
cloudflare:
|
||||
needs:
|
||||
- deploy
|
||||
- deploy-atomic-swap-dapp
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Request Cloudflare DNS sync (Phoenix)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
curl -sSf --retry 5 --retry-all-errors --retry-connrefused --retry-delay 10 --retry-max-time 300 \
|
||||
--connect-timeout 10 --max-time 120 \
|
||||
-X POST "${{ secrets.PHOENIX_DEPLOY_URL }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PHOENIX_DEPLOY_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${SHA}\",\"branch\":\"${BRANCH}\",\"target\":\"cloudflare-sync\"}" \
|
||||
|| { echo "Cloudflare DNS sync request failed; optional sync is non-blocking."; exit 0; }
|
||||
-d "{\"repo\":\"${{ gitea.repository }}\",\"sha\":\"${{ gitea.sha }}\",\"branch\":\"${{ gitea.ref_name }}\"}"
|
||||
continue-on-error: true
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
# Canonical PR validation workflow. Keep source and checked-in workflow copies byte-identical.
|
||||
# Validation checks both file sync and main/master parity.
|
||||
# PR-only: push validation already runs in deploy-to-phoenix.yml; this gives PRs the same
|
||||
# no-LAN checks without the deploy job (and without deploy secrets).
|
||||
name: Validate (PR)
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
branches: [main, master]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
run-all-validation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Fetch deploy branches for workflow parity check
|
||||
run: |
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
git fetch --depth=1 "$REMOTE" main master
|
||||
- name: Install validation dependencies
|
||||
run: |
|
||||
corepack enable
|
||||
pnpm install --frozen-lockfile
|
||||
# Optional: set org/repo variable URA_STRICT_CLOSURE=1 to fail PRs while pilot placeholders
|
||||
# remain in manifest (see scripts/ura/validate-manifest-closure.mjs). Not enabled by default.
|
||||
- name: run-all-validation (no LAN, no genesis)
|
||||
env:
|
||||
URA_STRICT_CLOSURE: ${{ vars.URA_STRICT_CLOSURE }}
|
||||
run: bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
@@ -2076,10 +2076,10 @@
|
||||
"baseSymbol": "cWETH",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd012000000000000000000000000000000000001",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_mainnet",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2091,10 +2091,10 @@
|
||||
"baseSymbol": "cWETH",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd011000000000000000000000000000000000001",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_mainnet",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2150,10 +2150,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd02200000000000000000000000000000000000a",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2165,10 +2165,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd02100000000000000000000000000000000000a",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2246,10 +2246,10 @@
|
||||
"baseSymbol": "cWXDAI",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd072000000000000000000000000000000000064",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "xdai",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2261,10 +2261,10 @@
|
||||
"baseSymbol": "cWXDAI",
|
||||
"quoteSymbol": "WXDAI",
|
||||
"poolAddress": "0xd071000000000000000000000000000000000064",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "xdai",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2276,10 +2276,10 @@
|
||||
"baseSymbol": "cWWEMIX",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd092000000000000000000000000000000000457",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "wemix",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2291,10 +2291,10 @@
|
||||
"baseSymbol": "cWWEMIX",
|
||||
"quoteSymbol": "WWEMIX",
|
||||
"poolAddress": "0xd091000000000000000000000000000000000457",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "wemix",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2339,10 +2339,10 @@
|
||||
"baseSymbol": "cWPOL",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd042000000000000000000000000000000000089",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "pol",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2354,10 +2354,10 @@
|
||||
"baseSymbol": "cWPOL",
|
||||
"quoteSymbol": "WPOL",
|
||||
"poolAddress": "0xd041000000000000000000000000000000000089",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "pol",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2413,10 +2413,10 @@
|
||||
"baseSymbol": "cWCRO",
|
||||
"quoteSymbol": "USDT",
|
||||
"poolAddress": "0xd062000000000000000000000000000000000019",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "cro",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2428,10 +2428,10 @@
|
||||
"baseSymbol": "cWCRO",
|
||||
"quoteSymbol": "WCRO",
|
||||
"poolAddress": "0xd061000000000000000000000000000000000019",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "cro",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2487,10 +2487,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd02200000000000000000000000000000000a4b1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2502,10 +2502,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd02100000000000000000000000000000000a4b1",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2572,10 +2572,10 @@
|
||||
"baseSymbol": "cWCELO",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd08200000000000000000000000000000000a4ec",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "celo",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2587,10 +2587,10 @@
|
||||
"baseSymbol": "cWCELO",
|
||||
"quoteSymbol": "WCELO",
|
||||
"poolAddress": "0xd08100000000000000000000000000000000a4ec",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "celo",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2635,10 +2635,10 @@
|
||||
"baseSymbol": "cWAVAX",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd05200000000000000000000000000000000a86a",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "avax",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2650,10 +2650,10 @@
|
||||
"baseSymbol": "cWAVAX",
|
||||
"quoteSymbol": "WAVAX",
|
||||
"poolAddress": "0xd05100000000000000000000000000000000a86a",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "avax",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2720,10 +2720,10 @@
|
||||
"baseSymbol": "cWBNB",
|
||||
"quoteSymbol": "USDT",
|
||||
"poolAddress": "0xd032000000000000000000000000000000000038",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "bnb",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2735,10 +2735,10 @@
|
||||
"baseSymbol": "cWBNB",
|
||||
"quoteSymbol": "WBNB",
|
||||
"poolAddress": "0xd031000000000000000000000000000000000038",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "bnb",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2816,10 +2816,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "USDC",
|
||||
"poolAddress": "0xd022000000000000000000000000000000002105",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
@@ -2831,10 +2831,10 @@
|
||||
"baseSymbol": "cWETHL2",
|
||||
"quoteSymbol": "WETH",
|
||||
"poolAddress": "0xd021000000000000000000000000000000002105",
|
||||
"active": false,
|
||||
"routingEnabled": false,
|
||||
"mcpVisible": false,
|
||||
"phase": "wave1-staged",
|
||||
"active": true,
|
||||
"routingEnabled": true,
|
||||
"mcpVisible": true,
|
||||
"phase": "wave1",
|
||||
"assetClass": "gas_native",
|
||||
"familyKey": "eth_l2",
|
||||
"venue": "dodo_pmm",
|
||||
|
||||
133
config/gru-v2-full-mesh-master-matrix.json
Normal file
133
config/gru-v2-full-mesh-master-matrix.json
Normal file
@@ -0,0 +1,133 @@
|
||||
{
|
||||
"statusDate": "2026-04-14",
|
||||
"namespaces": {
|
||||
"chain138": "c* V2",
|
||||
"allMainnet651940": "cA*",
|
||||
"publicConnectedNetworks": "cW*"
|
||||
},
|
||||
"executionPhases": [
|
||||
{
|
||||
"id": "P0",
|
||||
"namespace": "c* V2",
|
||||
"scope": "Chain 138 canonical hub and Wave 1"
|
||||
},
|
||||
{
|
||||
"id": "P1",
|
||||
"namespace": "c* V2",
|
||||
"scope": "Chain 138 cross-links and gas-native hubs"
|
||||
},
|
||||
{
|
||||
"id": "P2",
|
||||
"namespace": "cA*",
|
||||
"scope": "ALL Mainnet canonical hub and Wave 1"
|
||||
},
|
||||
{
|
||||
"id": "P3",
|
||||
"namespace": "cA*",
|
||||
"scope": "ALL Mainnet cross-links and gas-native hubs"
|
||||
},
|
||||
{
|
||||
"id": "P4",
|
||||
"namespace": "cW*",
|
||||
"scope": "Public cW stable hub, Wave 1, and gas-native mesh"
|
||||
},
|
||||
{
|
||||
"id": "P5",
|
||||
"namespace": "all",
|
||||
"scope": "Spot venue protocol completion"
|
||||
},
|
||||
{
|
||||
"id": "P6",
|
||||
"namespace": "all",
|
||||
"scope": "Aggregator, reserve, and market protocol completion"
|
||||
},
|
||||
{
|
||||
"id": "P7",
|
||||
"namespace": "all",
|
||||
"scope": "MEV completion"
|
||||
}
|
||||
],
|
||||
"protocolsRequired": [
|
||||
"DODO",
|
||||
"Uniswap v3",
|
||||
"Uniswap v2",
|
||||
"SushiSwap",
|
||||
"Curve",
|
||||
"Balancer",
|
||||
"1Inch",
|
||||
"Aave",
|
||||
"GMX",
|
||||
"dYdX"
|
||||
],
|
||||
"chain138CanonicalPools": [
|
||||
"cUSDT V2 / cUSDC V2",
|
||||
"cUSDT V2 / USDT",
|
||||
"cUSDC V2 / USDC",
|
||||
"cEURC V2 / cUSDC V2",
|
||||
"cEURT V2 / cUSDC V2",
|
||||
"cGBPC V2 / cUSDC V2",
|
||||
"cGBPT V2 / cUSDC V2",
|
||||
"cAUDC V2 / cUSDC V2",
|
||||
"cJPYC V2 / cUSDC V2",
|
||||
"cCHFC V2 / cUSDC V2",
|
||||
"cCADC V2 / cUSDC V2",
|
||||
"cXAUC V2 / cUSDC V2",
|
||||
"cXAUT V2 / cUSDC V2",
|
||||
"cEURC V2 / cEURT V2",
|
||||
"cGBPC V2 / cGBPT V2",
|
||||
"cXAUC V2 / cXAUT V2",
|
||||
"cETH / WETH",
|
||||
"cETH / cUSDC V2",
|
||||
"cETHL2 / cUSDC V2",
|
||||
"cBNB / cUSDC V2",
|
||||
"cPOL / cUSDC V2",
|
||||
"cAVAX / cUSDC V2",
|
||||
"cCRO / cUSDC V2",
|
||||
"cXDAI / cUSDC V2",
|
||||
"cCELO / cUSDC V2",
|
||||
"cWEMIX / cUSDC V2"
|
||||
],
|
||||
"allMainnetCanonicalPools": [
|
||||
"cAUSDT / cAUSDC",
|
||||
"cAUSDT / AUSDT",
|
||||
"cAUSDC / USDC",
|
||||
"cAEURC / cAUSDC",
|
||||
"cAEURT / cAUSDC",
|
||||
"cAGBPC / cAUSDC",
|
||||
"cAGBPT / cAUSDC",
|
||||
"cAAUDC / cAUSDC",
|
||||
"cAJPYC / cAUSDC",
|
||||
"cACHFC / cAUSDC",
|
||||
"cACADC / cAUSDC",
|
||||
"cAXAUC / cAUSDC",
|
||||
"cAXAUT / cAUSDC",
|
||||
"cAEURC / cAEURT",
|
||||
"cAGBPC / cAGBPT",
|
||||
"cAXAUC / cAXAUT",
|
||||
"cAETH / WETH",
|
||||
"cAETH / cAUSDC",
|
||||
"cAWALL / WALL",
|
||||
"cAWALL / cAUSDC"
|
||||
],
|
||||
"publicMeshTemplate": {
|
||||
"stableHub": [
|
||||
"cWUSDT / USDC",
|
||||
"cWUSDC / USDC",
|
||||
"cWUSDT / USDT",
|
||||
"cWUSDC / USDT",
|
||||
"cWUSDT / cWUSDC"
|
||||
],
|
||||
"wave1VsUsdc": [
|
||||
"cWEURC / USDC",
|
||||
"cWEURT / USDC",
|
||||
"cWGBPC / USDC",
|
||||
"cWGBPT / USDC",
|
||||
"cWAUDC / USDC",
|
||||
"cWJPYC / USDC",
|
||||
"cWCHFC / USDC",
|
||||
"cWCADC / USDC",
|
||||
"cWXAUC / USDC",
|
||||
"cWXAUT / USDC"
|
||||
]
|
||||
}
|
||||
}
|
||||
82
config/gru-v2-full-mesh-pool-tracker.schema.json
Normal file
82
config/gru-v2-full-mesh-pool-tracker.schema.json
Normal file
@@ -0,0 +1,82 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "https://d-bis.org/schemas/gru-v2-full-mesh-pool-tracker.json",
|
||||
"title": "GRU v2 Full Mesh Pool Tracker",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"statusDate",
|
||||
"defaultFields",
|
||||
"chain138",
|
||||
"allMainnet651940",
|
||||
"publicMesh"
|
||||
],
|
||||
"properties": {
|
||||
"statusDate": {
|
||||
"type": "string",
|
||||
"pattern": "^\\d{4}-\\d{2}-\\d{2}$"
|
||||
},
|
||||
"defaultFields": {
|
||||
"type": "object",
|
||||
"required": ["status", "deployed", "seeded", "validated", "live", "mevReady"],
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": ["todo", "in_progress", "blocked", "done"]
|
||||
},
|
||||
"deployed": { "type": "boolean" },
|
||||
"seeded": { "type": "boolean" },
|
||||
"validated": { "type": "boolean" },
|
||||
"live": { "type": "boolean" },
|
||||
"mevReady": { "type": "boolean" }
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"chain138": { "$ref": "#/$defs/namedBucket" },
|
||||
"allMainnet651940": { "$ref": "#/$defs/namedBucket" },
|
||||
"publicMesh": {
|
||||
"type": "object",
|
||||
"minProperties": 1,
|
||||
"additionalProperties": { "$ref": "#/$defs/meshBucket" }
|
||||
}
|
||||
},
|
||||
"$defs": {
|
||||
"pairEntry": {
|
||||
"type": "object",
|
||||
"required": ["pair"],
|
||||
"properties": {
|
||||
"pair": { "type": "string", "minLength": 3 },
|
||||
"priority": { "type": "string", "minLength": 2 }
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"namedBucket": {
|
||||
"type": "object",
|
||||
"required": ["namespace", "entries"],
|
||||
"properties": {
|
||||
"namespace": { "type": "string", "minLength": 2 },
|
||||
"entries": {
|
||||
"type": "array",
|
||||
"items": { "$ref": "#/$defs/pairEntry" }
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"meshBucket": {
|
||||
"type": "object",
|
||||
"required": ["namespace", "entries"],
|
||||
"properties": {
|
||||
"namespace": { "type": "string", "minLength": 2 },
|
||||
"entries": {
|
||||
"type": "array",
|
||||
"items": { "type": "string", "minLength": 3 }
|
||||
},
|
||||
"statusOverride": {
|
||||
"type": "string",
|
||||
"enum": ["planned", "todo", "in_progress", "blocked", "done"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
@@ -1936,7 +1936,7 @@
|
||||
"key": "Compliant_WEMIX_cW",
|
||||
"name": "cWEMIX->cWWEMIX",
|
||||
"addressFrom": "0x4d82206bec5b4dfa17759ffede07e35f4f63a050",
|
||||
"addressTo": "0x4c38f9a5ed68a04cd28a72e8c68c459ec34576f3",
|
||||
"addressTo": "0xc111000000000000000000000000000000000457",
|
||||
"notes": "Wave 1 gas-family lane wemix: Chain 138 cWEMIX -> Wemix cWWEMIX. hybrid_cap backing with uniswap_v3 reference pricing and DODO PMM edge liquidity."
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,217 +0,0 @@
|
||||
# Devin → Gitea → Proxmox CI/CD
|
||||
|
||||
**Status:** Working baseline for this repo
|
||||
**Last Updated:** 2026-04-20
|
||||
|
||||
## Goal
|
||||
|
||||
Create a repeatable path where:
|
||||
|
||||
1. Devin lands code in Gitea.
|
||||
2. Gitea Actions validates the repo on the site-wide `act_runner`.
|
||||
3. A successful workflow calls `phoenix-deploy-api`.
|
||||
4. `phoenix-deploy-api` resolves the repo/branch to a deploy target and runs the matching Proxmox publish command.
|
||||
5. The deploy service checks the target health URL before it reports success.
|
||||
|
||||
## Current baseline in this repo
|
||||
|
||||
The path now exists for **`d-bis/proxmox`** on **`main`** and **`master`**:
|
||||
|
||||
- Canonical workflow sources: [.gitea/workflow-sources/deploy-to-phoenix.yml](/home/intlc/projects/proxmox/.gitea/workflow-sources/deploy-to-phoenix.yml) and [.gitea/workflow-sources/validate-on-pr.yml](/home/intlc/projects/proxmox/.gitea/workflow-sources/validate-on-pr.yml)
|
||||
- Workflow: [deploy-to-phoenix.yml](/home/intlc/projects/proxmox/.gitea/workflows/deploy-to-phoenix.yml)
|
||||
- Manual app workflow: [deploy-portal-live.yml](/home/intlc/projects/proxmox/.gitea/workflows/deploy-portal-live.yml)
|
||||
- Deploy service: [server.js](/home/intlc/projects/proxmox/phoenix-deploy-api/server.js)
|
||||
- Target map: [deploy-targets.json](/home/intlc/projects/proxmox/phoenix-deploy-api/deploy-targets.json)
|
||||
- Current live publish script: [deploy-phoenix-deploy-api-to-dev-vm.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh)
|
||||
- Manual smoke trigger: [trigger-phoenix-deploy.sh](/home/intlc/projects/proxmox/scripts/dev-vm/trigger-phoenix-deploy.sh)
|
||||
- Target validator: [validate-phoenix-deploy-targets.sh](/home/intlc/projects/proxmox/scripts/validation/validate-phoenix-deploy-targets.sh)
|
||||
- Bootstrap helper: [bootstrap-phoenix-cicd.sh](/home/intlc/projects/proxmox/scripts/dev-vm/bootstrap-phoenix-cicd.sh)
|
||||
|
||||
That default target publishes the `phoenix-deploy-api` bundle to **VMID 5700** on the correct Proxmox node and starts the CT if needed.
|
||||
|
||||
A second target is now available:
|
||||
|
||||
- `portal-live` → runs [sync-sankofa-portal-7801.sh](/home/intlc/projects/proxmox/scripts/deployment/sync-sankofa-portal-7801.sh) and then checks `http://192.168.11.51:3000/`
|
||||
|
||||
## Workflow lockstep
|
||||
|
||||
Because both `main` and `master` can trigger deploys, deploy behavior is now defined from canonical source files and checked for branch parity.
|
||||
|
||||
- Edit only the source files under [.gitea/workflow-sources](/home/intlc/projects/proxmox/.gitea/workflow-sources:1)
|
||||
- Sync the checked-in workflow copies with:
|
||||
|
||||
```bash
|
||||
bash scripts/verify/sync-gitea-workflows.sh
|
||||
```
|
||||
|
||||
- Validate source sync plus `main`/`master` parity with:
|
||||
|
||||
```bash
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
```
|
||||
|
||||
The deploy and PR workflows both fetch `origin/main` and `origin/master` before validation, so branch drift now fails CI instead of silently changing deploy behavior.
|
||||
|
||||
## Flow
|
||||
|
||||
```text
|
||||
Devin
|
||||
-> push to Gitea
|
||||
-> Gitea Actions on act_runner (5700)
|
||||
-> bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
-> validates deploy-targets.json structure
|
||||
-> POST /api/deploy to phoenix-deploy-api
|
||||
-> match repo + branch + target in deploy-targets.json
|
||||
-> run deploy command
|
||||
-> verify target health URL
|
||||
-> update Gitea commit status success/failure
|
||||
```
|
||||
|
||||
## Required setup
|
||||
|
||||
### 1. Runner
|
||||
|
||||
Bring up the site-wide Gitea runner on VMID **5700**:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/bootstrap-gitea-act-runner-site-wide.sh
|
||||
```
|
||||
|
||||
Reference: [GITEA_ACT_RUNNER_SETUP.md](GITEA_ACT_RUNNER_SETUP.md)
|
||||
|
||||
### 0. One-command bootstrap
|
||||
|
||||
If root `.env` already contains the needed values, use:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/bootstrap-phoenix-cicd.sh --repo d-bis/proxmox
|
||||
```
|
||||
|
||||
This runs the validation gate, deploys `phoenix-deploy-api`, and smoke-checks the service.
|
||||
|
||||
### 2. Deploy API service
|
||||
|
||||
Deploy the API to the dev VM:
|
||||
|
||||
```bash
|
||||
./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --dry-run
|
||||
./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --apply --start-ct
|
||||
```
|
||||
|
||||
On the target VM, set at least:
|
||||
|
||||
```bash
|
||||
PORT=4001
|
||||
GITEA_URL=https://gitea.d-bis.org
|
||||
GITEA_TOKEN=<token with repo status access>
|
||||
PHOENIX_DEPLOY_SECRET=<shared secret>
|
||||
PHOENIX_REPO_ROOT=/home/intlc/projects/proxmox
|
||||
```
|
||||
|
||||
Optional:
|
||||
|
||||
```bash
|
||||
DEPLOY_TARGETS_PATH=/opt/phoenix-deploy-api/deploy-targets.json
|
||||
```
|
||||
|
||||
For the `portal-live` target, also set:
|
||||
|
||||
```bash
|
||||
SANKOFA_PORTAL_SRC=/home/intlc/projects/Sankofa/portal
|
||||
```
|
||||
|
||||
### 3. Gitea repo secrets
|
||||
|
||||
Set these in the Gitea repository that should deploy:
|
||||
|
||||
- `PHOENIX_DEPLOY_URL`
|
||||
- `PHOENIX_DEPLOY_TOKEN`
|
||||
|
||||
Example:
|
||||
|
||||
- `PHOENIX_DEPLOY_URL=http://192.168.11.59:4001/api/deploy`
|
||||
- `PHOENIX_DEPLOY_TOKEN=<same value as PHOENIX_DEPLOY_SECRET>`
|
||||
|
||||
For webhook signing, the bootstrap/helper path also expects:
|
||||
|
||||
- `PHOENIX_DEPLOY_SECRET`
|
||||
- `PHOENIX_WEBHOOK_DEPLOY_ENABLED=1` only if you want webhook events themselves to execute deploys
|
||||
|
||||
Do not enable both repo Actions deploys and webhook deploys for the same repo unless you intentionally want duplicate deploy attempts.
|
||||
|
||||
## Adding more repos or VM targets
|
||||
|
||||
Extend [deploy-targets.json](/home/intlc/projects/proxmox/phoenix-deploy-api/deploy-targets.json) with another entry.
|
||||
|
||||
Each target is keyed by:
|
||||
|
||||
- `repo`
|
||||
- `branch`
|
||||
- `target`
|
||||
|
||||
Each target defines:
|
||||
|
||||
- `cwd`
|
||||
- `command`
|
||||
- `required_env`
|
||||
- optional `healthcheck`
|
||||
- optional `timeout_sec`
|
||||
|
||||
Example shape:
|
||||
|
||||
```json
|
||||
{
|
||||
"repo": "d-bis/another-service",
|
||||
"branch": "main",
|
||||
"target": "portal-live",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": ["bash", "scripts/deployment/sync-sankofa-portal-7801.sh"],
|
||||
"required_env": ["PHOENIX_REPO_ROOT"]
|
||||
}
|
||||
```
|
||||
|
||||
Use separate `target` names when the same repo can publish to different VMIDs or environments.
|
||||
|
||||
Target-map validation is already part of:
|
||||
|
||||
```bash
|
||||
bash scripts/verify/run-all-validation.sh --skip-genesis
|
||||
```
|
||||
|
||||
and can also be run directly:
|
||||
|
||||
```bash
|
||||
bash scripts/validation/validate-phoenix-deploy-targets.sh
|
||||
```
|
||||
|
||||
## Manual testing
|
||||
|
||||
Before trusting a new Gitea workflow, trigger the deploy service directly:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/trigger-phoenix-deploy.sh
|
||||
```
|
||||
|
||||
Trigger the live portal deployment target directly:
|
||||
|
||||
```bash
|
||||
bash scripts/dev-vm/trigger-phoenix-deploy.sh d-bis/proxmox main portal-live
|
||||
```
|
||||
|
||||
Inspect configured targets:
|
||||
|
||||
```bash
|
||||
curl -s http://192.168.11.59:4001/api/deploy-targets | jq .
|
||||
```
|
||||
|
||||
## Recommended next expansions
|
||||
|
||||
- Add a Phoenix API target for the repo that owns VMID **7800** or **8600**, depending on which deployment line is canonical.
|
||||
- Add repo-specific workflows once the Sankofa source repos themselves are mirrored into Gitea Actions.
|
||||
- Move secret values from ad hoc `.env` files into the final operator-managed secret source once you settle the production host for `phoenix-deploy-api`.
|
||||
|
||||
## Notes
|
||||
|
||||
- The Gitea workflow is gated by `scripts/verify/run-all-validation.sh --skip-genesis` before deploy.
|
||||
- `phoenix-deploy-api` now returns `404` when no matching target exists and `500` when the deploy command fails.
|
||||
- Commit status updates are written back to Gitea from the deploy service itself.
|
||||
@@ -10,9 +10,26 @@ The full plan is only partially deployable today.
|
||||
|
||||
- Chain `138` canonical non-gas DODO PMM mesh: `script-backed` and live
|
||||
- Chain `138` pilot `Uniswap v3`, `Balancer`, `Curve`, and `1Inch` venues: `script-backed` and live
|
||||
- Chain `138` native `Uniswap v2` and `SushiSwap`: `script-backed`, deployed, seeded, and verified
|
||||
- Chain `138` deployed smart-contract publication now has a repo-backed orchestration lane:
|
||||
- targeted Blockscout submission wrappers for native `Uniswap v2` / `SushiSwap`
|
||||
- targeted Blockscout submission wrappers for the route execution stack and pilot venues
|
||||
- a generated publication report at [CHAIN138_DEPLOYED_SMART_CONTRACT_VERIFICATION_STATUS.md](/home/intlc/projects/proxmox/docs/04-configuration/CHAIN138_DEPLOYED_SMART_CONTRACT_VERIFICATION_STATUS.md:1)
|
||||
- current live publication status is explicit rather than implied:
|
||||
- `D3Oracle`, `D3Vault`, `DODOApprove`, and `DODOApproveProxy` are Blockscout-verified
|
||||
- `D3MMFactory` and `D3Proxy` still show bytecode-only metadata
|
||||
- the flash trio, native `Uniswap v2` / `SushiSwap`, and the route execution stack now have Blockscout verification submissions accepted, but the explorer API still exposes them as bytecode-only as of the latest report
|
||||
- repeated internal Blockscout polling after submission did not materialize source metadata yet, so the remaining work is now explorer-side verification materialization or manual explorer intervention rather than missing repo automation
|
||||
- Chain `138` supported spot / routing protocol publication is now live end to end:
|
||||
- token-aggregation planner capabilities expose `DODO`, `Uniswap v3`, `Uniswap v2`, `SushiSwap`, `Balancer`, `Curve`, and `1Inch`
|
||||
- MEV venue coverage exposes native `curve`, `dodo_d3mm`, `dodo_pmm`, `sushiswap`, `uniswap_v2`, and `uniswap_v3`
|
||||
- token-aggregation persistence for Chain `138` V2/Sushi pools is wired to the DBIS primary and writing into `liquidity_pools`
|
||||
- Chain `138` `Aave`: repo-backed deployment surface plus imported upstream native source now exist, but rollout remains blocked on real Chain `138` market deployment and canonical live addresses
|
||||
- Chain `138` `GMX`: imported upstream native source now exists, but rollout remains blocked on Chain `138` deployment/configuration work and canonical live addresses
|
||||
- Chain `138` `dYdX`: canonical inventory surface exists, but it remains blocked on a native protocol stack and live Chain `138` addresses
|
||||
- public `cW*` token and partial PMM rollout: `script-backed` in parts
|
||||
- ALL Mainnet `651940` full `cA*` mesh: `inventory-backed`, not fully deployer-backed
|
||||
- full protocol completion across `DODO`, `Uniswap v2`, `Uniswap v3`, `SushiSwap`, `Curve`, `Balancer`, `1Inch`, `Aave`, `GMX`, and `dYdX`: not fully deployer-backed
|
||||
- full protocol completion across `DODO`, `Uniswap v2`, `Uniswap v3`, `SushiSwap`, `Curve`, `Balancer`, `1Inch`, `Aave`, `GMX`, and `dYdX`: Chain `138` supported spot/routing set is complete; `Aave` and `GMX` now have imported upstream native source, while `dYdX` still remains an external native-stack gap
|
||||
|
||||
## Script-backed now
|
||||
|
||||
@@ -22,6 +39,25 @@ The full plan is only partially deployable today.
|
||||
| Chain `138` rollout wrapper | [scripts/deployment/run-all-next-steps-chain138.sh](/home/intlc/projects/proxmox/scripts/deployment/run-all-next-steps-chain138.sh:1) |
|
||||
| Chain `138` readiness validation | [scripts/verify/check-gru-v2-chain138-readiness.sh](/home/intlc/projects/proxmox/scripts/verify/check-gru-v2-chain138-readiness.sh:1) |
|
||||
| Chain `138` protocol venue deployer | [scripts/deployment/deploy-chain138-pilot-protocol-venues.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-chain138-pilot-protocol-venues.sh:1) |
|
||||
| Chain `138` native `Uniswap v2` deployer | [scripts/deployment/deploy-chain138-uniswap-v2-native.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-chain138-uniswap-v2-native.sh:1) |
|
||||
| Chain `138` native `SushiSwap` deployer | [scripts/deployment/deploy-chain138-sushiswap-native.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-chain138-sushiswap-native.sh:1) |
|
||||
| Chain `138` native V2 venue verification | [scripts/verify/check-chain138-native-v2-venues.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-native-v2-venues.sh:1) |
|
||||
| Chain `138` native V2 Blockscout publication | [scripts/verify/verify-chain138-native-v2-blockscout.sh](/home/intlc/projects/proxmox/scripts/verify/verify-chain138-native-v2-blockscout.sh:1) |
|
||||
| Chain `138` route execution stack Blockscout publication | [scripts/verify/verify-chain138-route-execution-stack-blockscout.sh](/home/intlc/projects/proxmox/scripts/verify/verify-chain138-route-execution-stack-blockscout.sh:1) |
|
||||
| Chain `138` deployed-contract publication report | [scripts/verify/check-chain138-deployed-contract-publication.py](/home/intlc/projects/proxmox/scripts/verify/check-chain138-deployed-contract-publication.py:1) |
|
||||
| Chain `138` publication orchestrator | [scripts/deployment/publish-chain138-deployed-smart-contracts.sh](/home/intlc/projects/proxmox/scripts/deployment/publish-chain138-deployed-smart-contracts.sh:1) |
|
||||
| Chain `138` Aave execution stack deployer | [scripts/deployment/deploy-chain138-aave-v3-execution-stack.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-chain138-aave-v3-execution-stack.sh:1) |
|
||||
| Chain `138` Aave quote-push receiver deployer | [scripts/deployment/deploy-chain138-aave-quote-push-receiver.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-chain138-aave-quote-push-receiver.sh:1) |
|
||||
| Chain `138` remaining protocol env verifier | [scripts/verify/check-chain138-remaining-protocol-env.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-remaining-protocol-env.sh:1) |
|
||||
| Chain `138` Aave rollout readiness verifier | [scripts/verify/check-chain138-aave-rollout-readiness.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-aave-rollout-readiness.sh:1) |
|
||||
| Chain `138` Aave runtime publication helper | [scripts/deployment/publish-chain138-aave-runtime-from-artifacts.sh](/home/intlc/projects/proxmox/scripts/deployment/publish-chain138-aave-runtime-from-artifacts.sh:1) |
|
||||
| Chain `138` Aave blocker-removal worksheet | [CHAIN138_AAVE_BLOCKER_REMOVAL_WORKSHEET.md](/home/intlc/projects/proxmox/docs/04-configuration/CHAIN138_AAVE_BLOCKER_REMOVAL_WORKSHEET.md:1) |
|
||||
| Chain `138` Aave rollout manifest template | [chain138-aave-rollout-manifest.example.json](/home/intlc/projects/proxmox/config/chain138-aave-rollout-manifest.example.json:1) |
|
||||
| Chain `138` Aave manifest apply helper | [scripts/deployment/apply-chain138-aave-manifest.sh](/home/intlc/projects/proxmox/scripts/deployment/apply-chain138-aave-manifest.sh:1) |
|
||||
| Imported upstream native Aave source | [vendor/chain138-protocols/aave-v3-origin](</home/intlc/projects/proxmox/vendor/chain138-protocols/aave-v3-origin>) |
|
||||
| Imported upstream native GMX source | [vendor/chain138-protocols/gmx-synthetics](</home/intlc/projects/proxmox/vendor/chain138-protocols/gmx-synthetics>) |
|
||||
| Chain `138` native Aave V3 Origin scaffold | [deploy-chain138-aave-v3-origin-market.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-chain138-aave-v3-origin-market.sh:1) |
|
||||
| Chain `138` native GMX synthetics scaffold | [deploy-chain138-gmx-synthetics-core.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-chain138-gmx-synthetics-core.sh:1) |
|
||||
| Chain `138` canonical PMM pool seeding | [smom-dbis-138/scripts/deployment/seed-chain138-canonical-pmm-pools.sh](/home/intlc/projects/proxmox/smom-dbis-138/scripts/deployment/seed-chain138-canonical-pmm-pools.sh:1) |
|
||||
| GRU mesh planning and live reconciliation | [scripts/verify/reconcile-gru-v2-full-mesh-status.py](/home/intlc/projects/proxmox/scripts/verify/reconcile-gru-v2-full-mesh-status.py:1) |
|
||||
| ALL Mainnet `cA*` token deployment wrapper | [scripts/deployment/deploy-allmainnet-ca-tokens.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-allmainnet-ca-tokens.sh:1) |
|
||||
@@ -33,6 +69,9 @@ The full plan is only partially deployable today.
|
||||
|---|---|---|
|
||||
| ALL Mainnet token inventory | [docs/11-references/ALL_MAINNET_TOKEN_ADDRESSES.md](/home/intlc/projects/proxmox/docs/11-references/ALL_MAINNET_TOKEN_ADDRESSES.md:1) | still needs final deployed `cA*` addresses to complete live inventory |
|
||||
| `651940` planned full mesh | [config/gru-v2-full-mesh-pool-tracker.json](/home/intlc/projects/proxmox/config/gru-v2-full-mesh-pool-tracker.json:1) | still needs final live pool addresses and liquidity |
|
||||
| Chain `138` remaining native protocol inventory | [config/chain138-remaining-protocol-surface.json](/home/intlc/projects/proxmox/config/chain138-remaining-protocol-surface.json:1) | `Aave` and `GMX` are now source-backed, but still need live Chain `138` deployment outputs and canonical addresses; `dYdX` still needs both source and live addresses |
|
||||
| Chain `138` remaining protocol discovery evidence | [CHAIN138_REMAINING_PROTOCOL_DISCOVERY_REPORT.md](/home/intlc/projects/proxmox/docs/04-configuration/CHAIN138_REMAINING_PROTOCOL_DISCOVERY_REPORT.md:1) | evidence pass found no discoverable canonical live addresses for Aave / GMX / dYdX on Chain `138` |
|
||||
| Chain `138` native protocol stack gap report | [CHAIN138_NATIVE_PROTOCOL_STACK_GAP_REPORT.md](/home/intlc/projects/proxmox/docs/04-configuration/CHAIN138_NATIVE_PROTOCOL_STACK_GAP_REPORT.md:1) | confirms the repo does not include full native Aave / GMX / dYdX deployment stacks |
|
||||
| public/non-public protocol target state | [docs/04-configuration/GRU_V2_PROTOCOL_COMPLETION_MATRIX.md](/home/intlc/projects/proxmox/docs/04-configuration/GRU_V2_PROTOCOL_COMPLETION_MATRIX.md:1) | no end-to-end deployer coverage for all protocol cells |
|
||||
|
||||
## External blockers
|
||||
@@ -42,6 +81,7 @@ The full plan is only partially deployable today.
|
||||
| Missing live `651940` venue addresses and integrations for the non-DODO protocol set | canonical env surface now exists, but the live addresses still need to be supplied |
|
||||
| Live liquidity and partner venue dependencies | even with scripts, final pool rows cannot be marked `live` without real seeding and venue support |
|
||||
| Chain `138` gas-native runtime verifier / vault wiring | the gas family rows remain blocked until real `CW_GAS_*_CHAIN138` addresses are supplied from deployed contracts |
|
||||
| Native `Aave`, `GMX`, and `dYdX` protocol programs on Chain `138` | `Aave` now has repo-backed deployment wrappers plus imported upstream source but still needs real Chain `138` market deployment outputs; `GMX` now has imported upstream source but still needs a Chain `138` deployment program and live addresses; `dYdX` still needs both a native stack and canonical live addresses |
|
||||
|
||||
## New operator entrypoints
|
||||
|
||||
@@ -51,3 +91,5 @@ The full plan is only partially deployable today.
|
||||
| [scripts/verify/check-gru-v2-full-deployment-implementation.py](/home/intlc/projects/proxmox/scripts/verify/check-gru-v2-full-deployment-implementation.py:1) | verify which plan segments are actually implemented in-repo |
|
||||
| [scripts/verify/check-gru-v2-core-protocol-blockers.sh](/home/intlc/projects/proxmox/scripts/verify/check-gru-v2-core-protocol-blockers.sh:1) | verify the repo-side blockers are closed and isolate only the remaining external dependencies |
|
||||
| [scripts/verify/check-allmainnet-protocol-env.sh](/home/intlc/projects/proxmox/scripts/verify/check-allmainnet-protocol-env.sh:1) | inventory the remaining ALL Mainnet protocol env gaps so missing venue coverage is explicit |
|
||||
| [scripts/verify/check-chain138-remaining-protocol-env.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-remaining-protocol-env.sh:1) | inventory the remaining Chain `138` Aave / GMX / dYdX protocol env gaps and verify bytecode when addresses are supplied |
|
||||
| [scripts/verify/check-chain138-native-protocol-stack-source.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-native-protocol-stack-source.sh:1) | prove whether the repo actually contains the native source families needed to deploy Aave / GMX / dYdX on Chain `138` |
|
||||
|
||||
106
docs/04-configuration/GRU_V2_FULL_MESH_EXECUTION_CHECKLIST.md
Normal file
106
docs/04-configuration/GRU_V2_FULL_MESH_EXECUTION_CHECKLIST.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# GRU v2 Full Mesh Execution Checklist
|
||||
|
||||
This checklist turns the master matrix into execution order.
|
||||
|
||||
Status values:
|
||||
|
||||
- `todo`
|
||||
- `in_progress`
|
||||
- `blocked`
|
||||
- `done`
|
||||
|
||||
Execution classes:
|
||||
|
||||
- `script-backed`
|
||||
- `inventory-backed`
|
||||
- `external-blocked`
|
||||
|
||||
Current truth:
|
||||
|
||||
- `138` DODO PMM work is `script-backed`
|
||||
- `651940` full `cA*` deployment is currently `inventory-backed` and `external-blocked`
|
||||
- protocol-complete rollout across both namespaces is not fully deployer-backed in this repo yet
|
||||
|
||||
## 1. Chain 138 Canonical Pools
|
||||
|
||||
| Status | Namespace | Pair / Venue | Priority | Notes |
|
||||
|---|---|---|---|---|
|
||||
| `todo` | `c* V2` | `cUSDT V2 / cUSDC V2` | `P0` | canonical USD hub |
|
||||
| `todo` | `c* V2` | `cUSDT V2 / USDT` | `P0` | native bridge rail |
|
||||
| `todo` | `c* V2` | `cUSDC V2 / USDC` | `P0` | native bridge rail |
|
||||
| `todo` | `c* V2` | `cEURC V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cEURT V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cGBPC V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cGBPT V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cAUDC V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cJPYC V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cCHFC V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cCADC V2 / cUSDC V2` | `P0` | Wave 1 |
|
||||
| `todo` | `c* V2` | `cXAUC V2 / cUSDC V2` | `P0` | commodity |
|
||||
| `todo` | `c* V2` | `cXAUT V2 / cUSDC V2` | `P0` | commodity |
|
||||
| `todo` | `c* V2` | `cEURC V2 / cEURT V2` | `P1` | cross-link |
|
||||
| `todo` | `c* V2` | `cGBPC V2 / cGBPT V2` | `P1` | cross-link |
|
||||
| `todo` | `c* V2` | `cXAUC V2 / cXAUT V2` | `P1` | cross-link |
|
||||
|
||||
## 2. ALL Mainnet Canonical Pools
|
||||
|
||||
| Status | Namespace | Pair / Venue | Priority | Notes |
|
||||
|---|---|---|---|---|
|
||||
| `todo` | `cA*` | `cAUSDT / cAUSDC` | `P0` | canonical ALL USD hub |
|
||||
| `todo` | `cA*` | `cAUSDT / AUSDT` | `P0` | native ALL rail |
|
||||
| `todo` | `cA*` | `cAUSDC / USDC` | `P0` | native ALL rail |
|
||||
| `todo` | `cA*` | `cAEURC / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cAEURT / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cAGBPC / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cAGBPT / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cAAUDC / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cAJPYC / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cACHFC / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cACADC / cAUSDC` | `P0` | Wave 1 |
|
||||
| `todo` | `cA*` | `cAXAUC / cAUSDC` | `P0` | commodity |
|
||||
| `todo` | `cA*` | `cAXAUT / cAUSDC` | `P0` | commodity |
|
||||
| `todo` | `cA*` | `cAEURC / cAEURT` | `P1` | cross-link |
|
||||
| `todo` | `cA*` | `cAGBPC / cAGBPT` | `P1` | cross-link |
|
||||
| `todo` | `cA*` | `cAXAUC / cAXAUT` | `P1` | cross-link |
|
||||
|
||||
## 3. Public cW Mesh
|
||||
|
||||
| Status | Chain | Required work | Priority | Notes |
|
||||
|---|---|---|---|---|
|
||||
| `todo` | `1` | full stable hub + Wave 1 + gas-native lanes | `P2` | first public reference mesh |
|
||||
| `todo` | `10` | full stable hub + Wave 1 + gas-native lanes | `P3` | ETH L2 |
|
||||
| `todo` | `8453` | full stable hub + Wave 1 + gas-native lanes | `P3` | ETH L2 |
|
||||
| `todo` | `42161` | full stable hub + Wave 1 + gas-native lanes | `P3` | ETH L2 |
|
||||
| `todo` | `137` | full stable hub + Wave 1 + gas-native lanes | `P4` | major public chain |
|
||||
| `todo` | `56` | full stable hub + Wave 1 + gas-native lanes | `P4` | major public chain |
|
||||
| `todo` | `100` | full stable hub + Wave 1 + gas-native lanes | `P4` | major public chain |
|
||||
| `todo` | `43114` | full stable hub + Wave 1 + gas-native lanes | `P4` | major public chain |
|
||||
| `todo` | `42220` | full stable hub + Wave 1 + gas-native lanes | `P4` | major public chain |
|
||||
| `todo` | `25` | full stable hub + Wave 1 + gas-native lanes | `P4` | major public chain |
|
||||
| `todo` | `1111` | publish mirrors, then deploy full mesh | `P5` | token family still incomplete |
|
||||
|
||||
## 4. Protocol Completion
|
||||
|
||||
| Status | Protocol | Namespace scope | Completion requirement |
|
||||
|---|---|---|---|
|
||||
| `todo` | `DODO` | all namespaces | primary PMM mesh complete |
|
||||
| `todo` | `Uniswap v3` | all namespaces | live reference and execution lanes |
|
||||
| `todo` | `Uniswap v2` | all namespaces | fallback spot lanes where applicable |
|
||||
| `todo` | `SushiSwap` | all namespaces | secondary AMM lanes |
|
||||
| `todo` | `Curve` | all namespaces | stable and basket lanes |
|
||||
| `todo` | `Balancer` | all namespaces | weighted and stable basket lanes |
|
||||
| `todo` | `1Inch` | all namespaces | routing/execution integration |
|
||||
| `todo` | `Aave` | all namespaces | reserve + flash-liquidity integration |
|
||||
| `todo` | `GMX` | all namespaces | market integration or unsupported-by-protocol closure |
|
||||
| `todo` | `dYdX` | all namespaces | market integration or unsupported-by-protocol closure |
|
||||
|
||||
## 5. MEV Completion
|
||||
|
||||
| Status | Requirement | Exit condition |
|
||||
|---|---|---|
|
||||
| `todo` | discovery | pools/venues visible in canonical MEV inventory |
|
||||
| `todo` | quoting | exact protocol quote path works |
|
||||
| `todo` | simulation | route simulation matches execution semantics |
|
||||
| `todo` | execution | execution adapter succeeds |
|
||||
| `todo` | settlement | settlement and attribution persist cleanly |
|
||||
| `todo` | observability | health / infra / freshness / coverage surfaces green |
|
||||
@@ -40,7 +40,7 @@ Implementation vocabulary:
|
||||
|
||||
| Namespace | DODO | Uni v3 | Uni v2 | Sushi | Curve | Balancer | 1Inch | Aave | GMX | dYdX |
|
||||
|---|---|---|---|---|---|---|---|---|---|---|
|
||||
| `138 c* V2` | `done`, `script-backed` | `done`, `script-backed` | `todo`, `external-blocked` | `todo`, `external-blocked` | `done`, `script-backed` | `done`, `script-backed` | `done`, `script-backed` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` |
|
||||
| `138 c* V2` | `done`, `script-backed` | `done`, `script-backed` | `done`, `script-backed` | `done`, `script-backed` | `done`, `script-backed` | `done`, `script-backed` | `done`, `script-backed` | `blocked`, `script-backed` | `blocked`, `inventory-backed` | `blocked`, `inventory-backed` |
|
||||
| `651940 cA*` | `todo`, `inventory-backed` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` |
|
||||
| public `cW*` | `in_progress`, `script-backed` | `in_progress`, `inventory-backed` | `in_progress`, `inventory-backed` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` | `in_progress`, `inventory-backed` | `todo`, `external-blocked` | `todo`, `external-blocked` | `todo`, `external-blocked` |
|
||||
|
||||
@@ -54,4 +54,16 @@ For a protocol cell to move to `done`, all of the following should be true:
|
||||
4. `live`
|
||||
5. `MEV-ready` where the protocol participates in MEV routing/execution
|
||||
|
||||
For `Aave`, `GMX`, and `dYdX`, if the protocol does not support the family or chain natively, the cell should be closed as `unsupported_by_protocol` rather than left ambiguous.
|
||||
For `Aave`, `GMX`, and `dYdX`, close a cell as `unsupported_by_protocol` only when the namespace is intentionally out of scope. If the protocol remains a target but lacks live contracts or addresses, keep it `blocked`.
|
||||
|
||||
## 4. Chain 138 closure evidence
|
||||
|
||||
These `138 c* V2` cells are closed with explicit evidence rather than left as open `todo` rows:
|
||||
|
||||
| Protocol | Closed status | Evidence |
|
||||
|---|---|---|
|
||||
| `Uniswap v2` | `done`, `script-backed` | native Chain `138` factory `0x0C30F6e67Ab3667fCc2f5CEA8e274ef1FB920279`, router `0x3019A7fDc76ba7F64F18d78e66842760037ee638`, and seeded pairs (`WETH/USDT`, `WETH/USDC`, `cUSDT/cUSDC`) are now published in `config/smart-contracts-master.json`, `.env.master.example`, and verified by [check-chain138-native-v2-venues.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-native-v2-venues.sh:1) |
|
||||
| `SushiSwap` | `done`, `script-backed` | native Chain `138` factory `0x2871207ff0d56089D70c0134d33f1291B6Fce0BE`, router `0xB37b93D38559f53b62ab020A14919f2630a1aE34`, and seeded pairs (`WETH/USDT`, `WETH/USDC`, `cUSDT/cUSDC`) are now published in `config/smart-contracts-master.json`, `.env.master.example`, and verified by [check-chain138-native-v2-venues.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-native-v2-venues.sh:1) |
|
||||
| `Aave` | `blocked`, `source-backed`, `external-blocked` | the repo now contains a Chain `138` native surface inventory, a hard env/bytecode checker, a Chain `138` wrapper for the Aave-backed MEV execution stack, a Chain `138` quote-push receiver deployer, and imported upstream source from `aave-dao/aave-v3-origin`, but no canonical Chain `138` Aave pool/provider/data-provider addresses or native market rollout are published yet |
|
||||
| `GMX` | `blocked`, `source-backed`, `external-blocked` | a canonical Chain `138` inventory surface now exists and the official upstream source from `gmx-io/gmx-synthetics` is now imported, but no Chain `138` GMX deployment outputs, live addresses, registry wiring, planner capabilities, or MEV venue coverage are published yet |
|
||||
| `dYdX` | `blocked`, `inventory-backed`, `external-blocked` | a canonical Chain `138` inventory surface now exists, but no Chain `138` dYdX market / data-provider / exchange addresses or vendored native deployment stack exist in canonical env, registry, planner capabilities, or MEV venue coverage |
|
||||
|
||||
@@ -1,247 +0,0 @@
|
||||
{
|
||||
"defaults": {
|
||||
"timeout_sec": 1800
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "default",
|
||||
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"phoenix-deploy-api/scripts/install-systemd.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.59:4001/health",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "phoenix-deploy-api",
|
||||
"attempts": 8,
|
||||
"delay_ms": 3000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "cloudflare-sync",
|
||||
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "cloudflare-sync-force",
|
||||
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "portal-live",
|
||||
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/sync-sankofa-portal-7801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"SANKOFA_PORTAL_SRC"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.51:3000/",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "<html",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/CurrenciCombo",
|
||||
"branch": "main",
|
||||
"target": "default",
|
||||
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://curucombo.xn--vov0g.com/api/ready",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"ready\":true",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "main",
|
||||
"target": "atomic-swap-dapp-live",
|
||||
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"liveBridgeRoutes\"",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "default",
|
||||
"description": "Install the Phoenix deploy API locally on the dev VM from the synced repo workspace.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"phoenix-deploy-api/scripts/install-systemd.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.59:4001/health",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "phoenix-deploy-api",
|
||||
"attempts": 8,
|
||||
"delay_ms": 3000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "atomic-swap-dapp-live",
|
||||
"description": "Deploy the Atomic Swap dApp to VMID 5801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/deploy-atomic-swap-dapp-5801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://atomic-swap.defi-oracle.io/data/live-route-registry.json",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"liveBridgeRoutes\"",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "cloudflare-sync",
|
||||
"description": "Optional: sync Cloudflare DNS from repo .env (path-gated; set PHOENIX_CLOUDFLARE_SYNC=1 on host).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "cloudflare-sync-force",
|
||||
"description": "Same as cloudflare-sync but skips path filter (operator / manual).",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/gitea-cloudflare-sync.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT"
|
||||
],
|
||||
"timeout_sec": 600
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/proxmox",
|
||||
"branch": "master",
|
||||
"target": "portal-live",
|
||||
"description": "Deploy the Sankofa portal to CT 7801 on Proxmox.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/sync-sankofa-portal-7801.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"SANKOFA_PORTAL_SRC"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "http://192.168.11.51:3000/",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "<html",
|
||||
"attempts": 10,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"repo": "d-bis/CurrenciCombo",
|
||||
"branch": "master",
|
||||
"target": "default",
|
||||
"description": "Deploy CurrenciCombo from the staged Gitea workspace into Phoenix CT 8604 and verify the public hostname end to end.",
|
||||
"cwd": "${PHOENIX_REPO_ROOT}",
|
||||
"command": [
|
||||
"bash",
|
||||
"scripts/deployment/phoenix-deploy-currencicombo-from-workspace.sh"
|
||||
],
|
||||
"required_env": [
|
||||
"PHOENIX_REPO_ROOT",
|
||||
"PHOENIX_DEPLOY_WORKSPACE"
|
||||
],
|
||||
"healthcheck": {
|
||||
"url": "https://curucombo.xn--vov0g.com/api/ready",
|
||||
"expect_status": 200,
|
||||
"expect_body_includes": "\"ready\":true",
|
||||
"attempts": 12,
|
||||
"delay_ms": 5000,
|
||||
"timeout_ms": 15000
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -25,70 +25,7 @@ if [[ -f "$REPO_ROOT/config/public-sector-program-manifest.json" ]]; then
|
||||
else
|
||||
echo "WARN: $REPO_ROOT/config/public-sector-program-manifest.json missing — set PUBLIC_SECTOR_MANIFEST_PATH in .env"
|
||||
fi
|
||||
if [[ -f "$TARGET/.env" ]]; then
|
||||
echo "Preserving existing $TARGET/.env"
|
||||
elif [[ -f "$APP_DIR/.env" ]]; then
|
||||
cp "$APP_DIR/.env" "$TARGET/.env"
|
||||
elif [[ -f "$APP_DIR/.env.example" ]]; then
|
||||
cp "$APP_DIR/.env.example" "$TARGET/.env"
|
||||
fi
|
||||
|
||||
ensure_env_value() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local file="$TARGET/.env"
|
||||
[[ -n "$value" && -f "$file" ]] || return 0
|
||||
|
||||
local current=""
|
||||
if grep -qE "^${key}=" "$file"; then
|
||||
current="$(grep -E "^${key}=" "$file" | tail -n 1 | cut -d= -f2-)"
|
||||
fi
|
||||
[[ -z "$current" ]] || return 0
|
||||
|
||||
local tmp
|
||||
tmp="$(mktemp)"
|
||||
awk -v key="$key" -v value="$value" '
|
||||
BEGIN { found = 0 }
|
||||
$0 ~ "^" key "=" {
|
||||
print key "=" value
|
||||
found = 1
|
||||
next
|
||||
}
|
||||
{ print }
|
||||
END {
|
||||
if (!found) print key "=" value
|
||||
}
|
||||
' "$file" > "$tmp"
|
||||
cat "$tmp" > "$file"
|
||||
rm -f "$tmp"
|
||||
}
|
||||
|
||||
repo_env_value() {
|
||||
local key="$1"
|
||||
local file="$REPO_ROOT/.env"
|
||||
[[ -f "$file" ]] || return 0
|
||||
grep -E "^${key}=" "$file" | tail -n 1 | cut -d= -f2-
|
||||
}
|
||||
|
||||
if [[ -f "$TARGET/.env" ]]; then
|
||||
ensure_env_value PHOENIX_REPO_ROOT "$REPO_ROOT"
|
||||
for key in \
|
||||
GITEA_TOKEN \
|
||||
PHOENIX_DEPLOY_SECRET \
|
||||
PROXMOX_HOST \
|
||||
PROXMOX_PORT \
|
||||
PROXMOX_USER \
|
||||
PROXMOX_TOKEN_NAME \
|
||||
PROXMOX_TOKEN_VALUE \
|
||||
PROXMOX_TLS_VERIFY \
|
||||
PUBLIC_IP \
|
||||
CLOUDFLARE_API_TOKEN \
|
||||
CLOUDFLARE_GITEA_SYNC_ZONE \
|
||||
PHOENIX_CLOUDFLARE_SYNC
|
||||
do
|
||||
ensure_env_value "$key" "$(repo_env_value "$key")"
|
||||
done
|
||||
fi
|
||||
[ -f "$APP_DIR/.env" ] && cp "$APP_DIR/.env" "$TARGET/.env" || [ -f "$APP_DIR/.env.example" ] && cp "$APP_DIR/.env.example" "$TARGET/.env" || true
|
||||
chown -R root:root "$TARGET"
|
||||
cd "$TARGET" && npm install --omit=dev
|
||||
cp "$APP_DIR/phoenix-deploy-api.service" /etc/systemd/system/
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Phoenix Deploy API — Gitea webhook receiver, deploy execution API, and Phoenix API Railing (Infra/VE)
|
||||
* Phoenix Deploy API — Gitea webhook receiver, deploy stub, and Phoenix API Railing (Infra/VE)
|
||||
*
|
||||
* Endpoints:
|
||||
* POST /webhook/gitea — Receives Gitea push/tag/PR webhooks
|
||||
@@ -19,9 +19,7 @@
|
||||
import crypto from 'crypto';
|
||||
import https from 'https';
|
||||
import path from 'path';
|
||||
import { promisify } from 'util';
|
||||
import { execFile as execFileCallback } from 'child_process';
|
||||
import { cpSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readdirSync, rmSync, writeFileSync } from 'fs';
|
||||
import { readFileSync, existsSync } from 'fs';
|
||||
import { fileURLToPath } from 'url';
|
||||
import express from 'express';
|
||||
|
||||
@@ -31,13 +29,6 @@ const PORT = parseInt(process.env.PORT || '4001', 10);
|
||||
const GITEA_URL = (process.env.GITEA_URL || 'https://gitea.d-bis.org').replace(/\/$/, '');
|
||||
const GITEA_TOKEN = process.env.GITEA_TOKEN || '';
|
||||
const WEBHOOK_SECRET = process.env.PHOENIX_DEPLOY_SECRET || '';
|
||||
const PHOENIX_REPO_ROOT_DEFAULT = (process.env.PHOENIX_REPO_ROOT_DEFAULT || '/srv/projects/proxmox').trim();
|
||||
const ATOMIC_SWAP_REPO = (process.env.PHOENIX_ATOMIC_SWAP_REPO || 'd-bis/atomic-swap-dapp').trim();
|
||||
const ATOMIC_SWAP_REF = (process.env.PHOENIX_ATOMIC_SWAP_REF || 'main').trim();
|
||||
const CROSS_CHAIN_PMM_LPS_REPO = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REPO || '').trim();
|
||||
const CROSS_CHAIN_PMM_LPS_REF = (process.env.PHOENIX_CROSS_CHAIN_PMM_LPS_REF || 'main').trim();
|
||||
const SMOM_DBIS_138_REPO = (process.env.PHOENIX_SMOM_DBIS_138_REPO || '').trim();
|
||||
const SMOM_DBIS_138_REF = (process.env.PHOENIX_SMOM_DBIS_138_REF || 'main').trim();
|
||||
|
||||
const PROXMOX_HOST = process.env.PROXMOX_HOST || '';
|
||||
const PROXMOX_PORT = parseInt(process.env.PROXMOX_PORT || '8006', 10);
|
||||
@@ -51,17 +42,6 @@ const PROMETHEUS_URL = (process.env.PROMETHEUS_URL || 'http://localhost:9090').r
|
||||
const PHOENIX_WEBHOOK_URL = process.env.PHOENIX_WEBHOOK_URL || '';
|
||||
const PHOENIX_WEBHOOK_SECRET = process.env.PHOENIX_WEBHOOK_SECRET || '';
|
||||
const PARTNER_KEYS = (process.env.PHOENIX_PARTNER_KEYS || '').split(',').map((k) => k.trim()).filter(Boolean);
|
||||
const WEBHOOK_DEPLOY_ENABLED = process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === '1' || process.env.PHOENIX_WEBHOOK_DEPLOY_ENABLED === 'true';
|
||||
const execFile = promisify(execFileCallback);
|
||||
|
||||
function expandEnvTokens(value, env = process.env) {
|
||||
if (typeof value !== 'string') return value;
|
||||
return value.replace(/\$\{([A-Z0-9_]+)\}/gi, (_, key) => env[key] || '');
|
||||
}
|
||||
|
||||
function resolvePhoenixRepoRoot() {
|
||||
return (process.env.PHOENIX_REPO_ROOT || PHOENIX_REPO_ROOT_DEFAULT || '').trim().replace(/\/$/, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Manifest resolution order:
|
||||
@@ -83,395 +63,15 @@ function resolvePublicSectorManifestPath() {
|
||||
return path.join(__dirname, '..', 'config', 'public-sector-program-manifest.json');
|
||||
}
|
||||
|
||||
function resolveDeployTargetsPath() {
|
||||
const override = (process.env.DEPLOY_TARGETS_PATH || '').trim();
|
||||
if (override && existsSync(override)) return override;
|
||||
const bundled = path.join(__dirname, 'deploy-targets.json');
|
||||
if (existsSync(bundled)) return bundled;
|
||||
return bundled;
|
||||
}
|
||||
|
||||
function loadDeployTargetsConfig() {
|
||||
const configPath = resolveDeployTargetsPath();
|
||||
if (!existsSync(configPath)) {
|
||||
return {
|
||||
path: configPath,
|
||||
defaults: {},
|
||||
targets: [],
|
||||
};
|
||||
}
|
||||
const raw = readFileSync(configPath, 'utf8');
|
||||
const parsed = JSON.parse(raw);
|
||||
return {
|
||||
path: configPath,
|
||||
defaults: parsed.defaults || {},
|
||||
targets: Array.isArray(parsed.targets) ? parsed.targets : [],
|
||||
};
|
||||
}
|
||||
|
||||
function findDeployTarget(repo, branch, requestedTarget) {
|
||||
const config = loadDeployTargetsConfig();
|
||||
const wantedTarget = requestedTarget || 'default';
|
||||
const match = config.targets.find((entry) => {
|
||||
if (entry.repo !== repo) return false;
|
||||
if ((entry.branch || 'main') !== branch) return false;
|
||||
return (entry.target || 'default') === wantedTarget;
|
||||
});
|
||||
return { config, match, wantedTarget };
|
||||
}
|
||||
|
||||
async function sleep(ms) {
|
||||
await new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
async function verifyHealthCheck(healthcheck) {
|
||||
if (!healthcheck || !healthcheck.url) return null;
|
||||
|
||||
const attempts = Math.max(1, Number(healthcheck.attempts || 1));
|
||||
const delayMs = Math.max(0, Number(healthcheck.delay_ms || 0));
|
||||
const timeoutMs = Math.max(1000, Number(healthcheck.timeout_ms || 10000));
|
||||
const expectedStatus = Number(healthcheck.expect_status || 200);
|
||||
const expectBodyIncludes = healthcheck.expect_body_includes || '';
|
||||
|
||||
let lastError = null;
|
||||
for (let attempt = 1; attempt <= attempts; attempt += 1) {
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
||||
const res = await fetch(healthcheck.url, { signal: controller.signal });
|
||||
const body = await res.text();
|
||||
clearTimeout(timeout);
|
||||
|
||||
if (res.status !== expectedStatus) {
|
||||
throw new Error(`Expected HTTP ${expectedStatus}, got ${res.status}`);
|
||||
}
|
||||
if (expectBodyIncludes && !body.includes(expectBodyIncludes)) {
|
||||
throw new Error(`Health body missing expected text: ${expectBodyIncludes}`);
|
||||
}
|
||||
|
||||
return {
|
||||
ok: true,
|
||||
url: healthcheck.url,
|
||||
status: res.status,
|
||||
attempt,
|
||||
};
|
||||
} catch (err) {
|
||||
lastError = err;
|
||||
if (attempt < attempts && delayMs > 0) {
|
||||
await sleep(delayMs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Health check failed for ${healthcheck.url}: ${lastError?.message || 'unknown error'}`);
|
||||
}
|
||||
|
||||
async function downloadRepoArchive({ owner, repo, ref, archivePath, authToken }) {
|
||||
const archiveRef = `${ref}.tar.gz`;
|
||||
const url = `${GITEA_URL}/api/v1/repos/${owner}/${repo}/archive/${archiveRef}`;
|
||||
const headers = {};
|
||||
if (authToken) headers.Authorization = `token ${authToken}`;
|
||||
const res = await fetch(url, { headers });
|
||||
if (!res.ok) {
|
||||
throw new Error(`Failed to download archive ${owner}/${repo}@${ref}: HTTP ${res.status}`);
|
||||
}
|
||||
const buffer = Buffer.from(await res.arrayBuffer());
|
||||
writeFileSync(archivePath, buffer);
|
||||
}
|
||||
|
||||
function syncExtractedTree({ sourceRoot, destRoot, entries = null }) {
|
||||
mkdirSync(destRoot, { recursive: true });
|
||||
const selectedEntries = Array.isArray(entries) ? entries : readdirSync(sourceRoot);
|
||||
for (const entry of selectedEntries) {
|
||||
const sourcePath = path.join(sourceRoot, entry);
|
||||
if (!existsSync(sourcePath)) continue;
|
||||
const destPath = path.join(destRoot, entry);
|
||||
rmSync(destPath, { recursive: true, force: true });
|
||||
cpSync(sourcePath, destPath, { recursive: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function syncRepoArchive({ owner, repo, ref, destRoot, entries = null, authToken = '' }) {
|
||||
const tempDir = mkdtempSync('/tmp/phoenix-archive-');
|
||||
const archivePath = path.join(tempDir, 'repo.tar.gz');
|
||||
const extractDir = path.join(tempDir, 'extract');
|
||||
mkdirSync(extractDir, { recursive: true });
|
||||
try {
|
||||
await downloadRepoArchive({ owner, repo, ref, archivePath, authToken });
|
||||
await execFile('tar', ['-xzf', archivePath, '-C', extractDir]);
|
||||
const [rootDir] = readdirSync(extractDir);
|
||||
if (!rootDir) {
|
||||
throw new Error(`Archive for ${owner}/${repo}@${ref} was empty`);
|
||||
}
|
||||
syncExtractedTree({
|
||||
sourceRoot: path.join(extractDir, rootDir),
|
||||
destRoot,
|
||||
entries,
|
||||
});
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function prepareDeployWorkspace({ repo, branch, sha, target }) {
|
||||
const repoRoot = resolvePhoenixRepoRoot();
|
||||
if (!repoRoot) {
|
||||
throw new Error('PHOENIX_REPO_ROOT is not configured');
|
||||
}
|
||||
|
||||
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
|
||||
const externalWorkspaceRoot = path.join(repoRoot, '.phoenix-deploy-workspaces', owner, repoName);
|
||||
|
||||
// Manual smoke tests can target the already-staged local workspace without
|
||||
// forcing an archive sync from Gitea.
|
||||
if (sha === 'HEAD' || sha === 'local') {
|
||||
mkdirSync(repoRoot, { recursive: true });
|
||||
if (repo !== 'd-bis/proxmox') {
|
||||
mkdirSync(externalWorkspaceRoot, { recursive: true });
|
||||
}
|
||||
return {
|
||||
PHOENIX_REPO_ROOT: repoRoot,
|
||||
PROXMOX_REPO_ROOT: repoRoot,
|
||||
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
|
||||
};
|
||||
}
|
||||
|
||||
const ref = sha || branch || 'main';
|
||||
|
||||
if (repo === 'd-bis/proxmox') {
|
||||
await syncRepoArchive({
|
||||
owner,
|
||||
repo: repoName,
|
||||
ref,
|
||||
destRoot: repoRoot,
|
||||
entries: ['config', 'phoenix-deploy-api', 'reports', 'scripts', 'token-lists'],
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
} else {
|
||||
await syncRepoArchive({
|
||||
owner,
|
||||
repo: repoName,
|
||||
ref,
|
||||
destRoot: externalWorkspaceRoot,
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
|
||||
if (repo === 'd-bis/proxmox' && target === 'atomic-swap-dapp-live') {
|
||||
const [swapOwner, swapRepo] = ATOMIC_SWAP_REPO.includes('/')
|
||||
? ATOMIC_SWAP_REPO.split('/')
|
||||
: ['d-bis', ATOMIC_SWAP_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: swapOwner,
|
||||
repo: swapRepo,
|
||||
ref: ATOMIC_SWAP_REF,
|
||||
destRoot: path.join(repoRoot, 'atomic-swap-dapp'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
|
||||
if (CROSS_CHAIN_PMM_LPS_REPO) {
|
||||
const [lpsOwner, lpsRepo] = CROSS_CHAIN_PMM_LPS_REPO.includes('/')
|
||||
? CROSS_CHAIN_PMM_LPS_REPO.split('/')
|
||||
: ['d-bis', CROSS_CHAIN_PMM_LPS_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: lpsOwner,
|
||||
repo: lpsRepo,
|
||||
ref: CROSS_CHAIN_PMM_LPS_REF,
|
||||
destRoot: path.join(repoRoot, 'cross-chain-pmm-lps'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
|
||||
if (SMOM_DBIS_138_REPO) {
|
||||
const [smomOwner, smomRepo] = SMOM_DBIS_138_REPO.includes('/')
|
||||
? SMOM_DBIS_138_REPO.split('/')
|
||||
: ['d-bis', SMOM_DBIS_138_REPO];
|
||||
await syncRepoArchive({
|
||||
owner: smomOwner,
|
||||
repo: smomRepo,
|
||||
ref: SMOM_DBIS_138_REF,
|
||||
destRoot: path.join(repoRoot, 'smom-dbis-138'),
|
||||
authToken: GITEA_TOKEN,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
PHOENIX_REPO_ROOT: repoRoot,
|
||||
PROXMOX_REPO_ROOT: repoRoot,
|
||||
PHOENIX_DEPLOY_WORKSPACE: repo === 'd-bis/proxmox' ? repoRoot : externalWorkspaceRoot,
|
||||
};
|
||||
}
|
||||
|
||||
async function runDeployTarget(definition, configDefaults, context, envOverrides = {}) {
|
||||
if (!Array.isArray(definition.command) || definition.command.length === 0) {
|
||||
throw new Error('Deploy target is missing a command array');
|
||||
}
|
||||
|
||||
const childEnv = {
|
||||
...process.env,
|
||||
...envOverrides,
|
||||
PHOENIX_DEPLOY_REPO: context.repo,
|
||||
PHOENIX_DEPLOY_BRANCH: context.branch,
|
||||
PHOENIX_DEPLOY_SHA: context.sha || '',
|
||||
PHOENIX_DEPLOY_TARGET: context.target,
|
||||
PHOENIX_DEPLOY_TRIGGER: context.trigger,
|
||||
};
|
||||
|
||||
const cwd = expandEnvTokens(definition.cwd || configDefaults.cwd || process.cwd(), childEnv);
|
||||
const timeoutSeconds = Number(definition.timeout_sec || configDefaults.timeout_sec || 1800);
|
||||
const timeout = Number.isFinite(timeoutSeconds) && timeoutSeconds > 0 ? timeoutSeconds * 1000 : 1800 * 1000;
|
||||
const command = definition.command.map((part) => expandEnvTokens(part, childEnv));
|
||||
const missingEnv = (definition.required_env || []).filter((key) => !childEnv[key]);
|
||||
if (missingEnv.length > 0) {
|
||||
throw new Error(`Missing required env for deploy target: ${missingEnv.join(', ')}`);
|
||||
}
|
||||
if (!existsSync(cwd)) {
|
||||
throw new Error(`Deploy working directory does not exist: ${cwd}`);
|
||||
}
|
||||
|
||||
const { stdout, stderr } = await execFile(command[0], command.slice(1), {
|
||||
cwd,
|
||||
env: childEnv,
|
||||
timeout,
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
});
|
||||
|
||||
const healthcheck = await verifyHealthCheck(definition.healthcheck || configDefaults.healthcheck || null);
|
||||
|
||||
return {
|
||||
cwd,
|
||||
command,
|
||||
stdout: stdout || '',
|
||||
stderr: stderr || '',
|
||||
timeout_sec: timeoutSeconds,
|
||||
healthcheck,
|
||||
};
|
||||
}
|
||||
|
||||
async function executeDeploy({ repo, branch = 'main', target = 'default', sha = '', trigger = 'api' }) {
|
||||
if (!repo) {
|
||||
const error = new Error('repo required');
|
||||
error.statusCode = 400;
|
||||
error.payload = { error: error.message };
|
||||
throw error;
|
||||
}
|
||||
|
||||
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
|
||||
const commitSha = sha || '';
|
||||
const requestedTarget = target || 'default';
|
||||
const { config, match, wantedTarget } = findDeployTarget(repo, branch, requestedTarget);
|
||||
|
||||
if (!match) {
|
||||
const error = new Error('Deploy target not configured');
|
||||
error.statusCode = 404;
|
||||
error.payload = {
|
||||
error: error.message,
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
config_path: config.path,
|
||||
};
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `No deploy target for ${repo} ${branch} ${wantedTarget}`);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
|
||||
}
|
||||
|
||||
console.log(`[deploy] ${repo} branch=${branch} target=${wantedTarget} sha=${commitSha} trigger=${trigger}`);
|
||||
|
||||
let deployResult = null;
|
||||
let deployError = null;
|
||||
let envOverrides = {};
|
||||
|
||||
try {
|
||||
envOverrides = await prepareDeployWorkspace({
|
||||
repo,
|
||||
branch,
|
||||
sha: commitSha,
|
||||
target: wantedTarget,
|
||||
});
|
||||
deployResult = await runDeployTarget(match, config.defaults, {
|
||||
repo,
|
||||
branch,
|
||||
sha: commitSha,
|
||||
target: wantedTarget,
|
||||
trigger,
|
||||
}, envOverrides);
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'success', `Deployed to ${wantedTarget}`);
|
||||
}
|
||||
return {
|
||||
status: 'completed',
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
config_path: config.path,
|
||||
command: deployResult.command,
|
||||
cwd: deployResult.cwd,
|
||||
stdout: deployResult.stdout,
|
||||
stderr: deployResult.stderr,
|
||||
healthcheck: deployResult.healthcheck,
|
||||
};
|
||||
} catch (err) {
|
||||
deployError = err;
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'failure', `Deploy failed: ${err.message.slice(0, 120)}`);
|
||||
}
|
||||
err.statusCode = err.statusCode || 500;
|
||||
err.payload = err.payload || {
|
||||
error: err.message,
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
config_path: config.path,
|
||||
};
|
||||
throw err;
|
||||
} finally {
|
||||
if (PHOENIX_WEBHOOK_URL) {
|
||||
const payload = {
|
||||
event: 'deploy.completed',
|
||||
repo,
|
||||
branch,
|
||||
target: wantedTarget,
|
||||
sha: commitSha,
|
||||
success: Boolean(deployResult),
|
||||
command: deployResult?.command,
|
||||
cwd: deployResult?.cwd,
|
||||
phoenix_repo_root: envOverrides.PHOENIX_REPO_ROOT || null,
|
||||
error: deployError?.message || null,
|
||||
};
|
||||
const body = JSON.stringify(payload);
|
||||
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
|
||||
fetch(PHOENIX_WEBHOOK_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
|
||||
body,
|
||||
}).catch((e) => console.error('[webhook] outbound failed', e.message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const httpsAgent = new https.Agent({ rejectUnauthorized: process.env.PROXMOX_TLS_VERIFY !== '0' });
|
||||
|
||||
function formatProxmoxAuthHeader(user, tokenName, tokenValue) {
|
||||
if (tokenName.includes('!')) {
|
||||
return `PVEAPIToken=${tokenName}=${tokenValue}`;
|
||||
}
|
||||
return `PVEAPIToken=${user}!${tokenName}=${tokenValue}`;
|
||||
}
|
||||
|
||||
async function proxmoxRequest(endpoint, method = 'GET', body = null) {
|
||||
const baseUrl = `https://${PROXMOX_HOST}:${PROXMOX_PORT}/api2/json`;
|
||||
const url = `${baseUrl}${endpoint}`;
|
||||
const options = {
|
||||
method,
|
||||
headers: {
|
||||
Authorization: formatProxmoxAuthHeader(PROXMOX_USER, PROXMOX_TOKEN_NAME, PROXMOX_TOKEN_VALUE),
|
||||
Authorization: `PVEAPIToken=${PROXMOX_USER}!${PROXMOX_TOKEN_NAME}=${PROXMOX_TOKEN_VALUE}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
agent: httpsAgent,
|
||||
@@ -562,44 +162,12 @@ app.post('/webhook/gitea', async (req, res) => {
|
||||
|
||||
if (action === 'push' || (action === 'synchronize' && payload.pull_request)) {
|
||||
if (branch === 'main' || branch === 'master' || ref.startsWith('refs/tags/')) {
|
||||
if (!WEBHOOK_DEPLOY_ENABLED) {
|
||||
return res.status(200).json({
|
||||
received: true,
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
deployed: false,
|
||||
message: 'Webhook accepted; set PHOENIX_WEBHOOK_DEPLOY_ENABLED=1 to execute deploys from webhook events.',
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await executeDeploy({
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
target: 'default',
|
||||
trigger: 'webhook',
|
||||
});
|
||||
return res.status(200).json({
|
||||
received: true,
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
deployed: true,
|
||||
result,
|
||||
});
|
||||
} catch (err) {
|
||||
return res.status(200).json({
|
||||
received: true,
|
||||
repo: fullName,
|
||||
branch,
|
||||
sha,
|
||||
deployed: false,
|
||||
error: err.message,
|
||||
details: err.payload || null,
|
||||
});
|
||||
if (sha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, sha, 'pending', 'Phoenix deployment triggered');
|
||||
}
|
||||
// Stub: enqueue deploy; actual implementation would call Proxmox/deploy logic
|
||||
console.log(`[deploy-stub] Would deploy ${fullName} branch=${branch} sha=${sha}`);
|
||||
// Stub: when full deploy runs, call setGiteaCommitStatus(owner, repoName, sha, 'success'|'failure', ...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -617,36 +185,47 @@ app.post('/api/deploy', async (req, res) => {
|
||||
}
|
||||
|
||||
const { repo, branch = 'main', target, sha } = req.body;
|
||||
try {
|
||||
const result = await executeDeploy({
|
||||
repo,
|
||||
branch,
|
||||
sha,
|
||||
target,
|
||||
trigger: 'api',
|
||||
});
|
||||
res.status(200).json(result);
|
||||
} catch (err) {
|
||||
res.status(err.statusCode || 500).json(err.payload || { error: err.message });
|
||||
if (!repo) {
|
||||
return res.status(400).json({ error: 'repo required' });
|
||||
}
|
||||
});
|
||||
|
||||
app.get('/api/deploy-targets', (req, res) => {
|
||||
const config = loadDeployTargetsConfig();
|
||||
const targets = config.targets.map((entry) => ({
|
||||
repo: entry.repo,
|
||||
branch: entry.branch || 'main',
|
||||
target: entry.target || 'default',
|
||||
description: entry.description || '',
|
||||
cwd: entry.cwd || config.defaults.cwd || '',
|
||||
command: entry.command || [],
|
||||
has_healthcheck: Boolean(entry.healthcheck || config.defaults.healthcheck),
|
||||
}));
|
||||
res.json({
|
||||
config_path: config.path,
|
||||
count: targets.length,
|
||||
targets,
|
||||
const [owner, repoName] = repo.includes('/') ? repo.split('/') : ['d-bis', repo];
|
||||
const commitSha = sha || '';
|
||||
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(owner, repoName, commitSha, 'pending', 'Phoenix deployment in progress');
|
||||
}
|
||||
|
||||
console.log(`[deploy] ${repo} branch=${branch} target=${target || 'default'} sha=${commitSha}`);
|
||||
// Stub: no real deploy yet — report success so Gitea shows green; replace with real deploy + setGiteaCommitStatus on completion
|
||||
const deploySuccess = true;
|
||||
if (commitSha && GITEA_TOKEN) {
|
||||
await setGiteaCommitStatus(
|
||||
owner,
|
||||
repoName,
|
||||
commitSha,
|
||||
deploySuccess ? 'success' : 'failure',
|
||||
deploySuccess ? 'Deploy accepted (stub)' : 'Deploy failed (stub)'
|
||||
);
|
||||
}
|
||||
res.status(202).json({
|
||||
status: 'accepted',
|
||||
repo,
|
||||
branch,
|
||||
target: target || 'default',
|
||||
message: 'Deploy request queued (stub). Implement full deploy logic in Sankofa Phoenix API.',
|
||||
});
|
||||
|
||||
if (PHOENIX_WEBHOOK_URL) {
|
||||
const payload = { event: 'deploy.completed', repo, branch, target: target || 'default', sha: commitSha, success: deploySuccess };
|
||||
const body = JSON.stringify(payload);
|
||||
const sig = crypto.createHmac('sha256', PHOENIX_WEBHOOK_SECRET || '').update(body).digest('hex');
|
||||
fetch(PHOENIX_WEBHOOK_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-Phoenix-Signature': `sha256=${sig}` },
|
||||
body,
|
||||
}).catch((e) => console.error('[webhook] outbound failed', e.message));
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
@@ -895,10 +474,7 @@ app.listen(PORT, () => {
|
||||
if (!GITEA_TOKEN) console.warn('GITEA_TOKEN not set — commit status updates disabled');
|
||||
if (!hasProxmox) console.warn('PROXMOX_* not set — Infra/VE API returns stub data');
|
||||
if (PHOENIX_WEBHOOK_URL) console.log('Outbound webhook enabled:', PHOENIX_WEBHOOK_URL);
|
||||
if (WEBHOOK_DEPLOY_ENABLED) console.log('Inbound webhook deploy execution enabled');
|
||||
if (PARTNER_KEYS.length > 0) console.log('Partner API key auth enabled for /api/v1/* (except GET /api/v1/public-sector/programs)');
|
||||
const mpath = resolvePublicSectorManifestPath();
|
||||
const dpath = resolveDeployTargetsPath();
|
||||
console.log(`Public-sector manifest: ${mpath} (${existsSync(mpath) ? 'ok' : 'missing'})`);
|
||||
console.log(`Deploy targets: ${dpath} (${existsSync(dpath) ? 'ok' : 'missing'})`);
|
||||
});
|
||||
|
||||
1332
reports/status/gru_v2_full_mesh_status_report.json
Normal file
1332
reports/status/gru_v2_full_mesh_status_report.json
Normal file
File diff suppressed because it is too large
Load Diff
234
reports/status/gru_v2_full_mesh_tracker.csv
Normal file
234
reports/status/gru_v2_full_mesh_tracker.csv
Normal file
@@ -0,0 +1,234 @@
|
||||
namespace,chain,pair,priority,status
|
||||
c* V2,138,cUSDT V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cUSDT V2 / USDT,P0,todo
|
||||
c* V2,138,cUSDC V2 / USDC,P0,todo
|
||||
c* V2,138,cEURC V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cEURT V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cGBPC V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cGBPT V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cAUDC V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cJPYC V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cCHFC V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cCADC V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cXAUC V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cXAUT V2 / cUSDC V2,P0,todo
|
||||
c* V2,138,cEURC V2 / cEURT V2,P1,todo
|
||||
c* V2,138,cGBPC V2 / cGBPT V2,P1,todo
|
||||
c* V2,138,cXAUC V2 / cXAUT V2,P1,todo
|
||||
c* V2,138,cETH / WETH,P1,todo
|
||||
c* V2,138,cETH / cUSDC V2,P1,todo
|
||||
c* V2,138,cETHL2 / cUSDC V2,P2,todo
|
||||
c* V2,138,cBNB / cUSDC V2,P2,todo
|
||||
c* V2,138,cPOL / cUSDC V2,P2,todo
|
||||
c* V2,138,cAVAX / cUSDC V2,P2,todo
|
||||
c* V2,138,cCRO / cUSDC V2,P2,todo
|
||||
c* V2,138,cXDAI / cUSDC V2,P2,todo
|
||||
c* V2,138,cCELO / cUSDC V2,P2,todo
|
||||
c* V2,138,cWEMIX / cUSDC V2,P2,todo
|
||||
cA*,651940,cAUSDT / cAUSDC,P0,todo
|
||||
cA*,651940,cAUSDT / AUSDT,P0,todo
|
||||
cA*,651940,cAUSDC / USDC,P0,todo
|
||||
cA*,651940,cAEURC / cAUSDC,P0,todo
|
||||
cA*,651940,cAEURT / cAUSDC,P0,todo
|
||||
cA*,651940,cAGBPC / cAUSDC,P0,todo
|
||||
cA*,651940,cAGBPT / cAUSDC,P0,todo
|
||||
cA*,651940,cAAUDC / cAUSDC,P0,todo
|
||||
cA*,651940,cAJPYC / cAUSDC,P0,todo
|
||||
cA*,651940,cACHFC / cAUSDC,P0,todo
|
||||
cA*,651940,cACADC / cAUSDC,P0,todo
|
||||
cA*,651940,cAXAUC / cAUSDC,P0,todo
|
||||
cA*,651940,cAXAUT / cAUSDC,P0,todo
|
||||
cA*,651940,cAEURC / cAEURT,P1,todo
|
||||
cA*,651940,cAGBPC / cAGBPT,P1,todo
|
||||
cA*,651940,cAXAUC / cAXAUT,P1,todo
|
||||
cA*,651940,cAETH / WETH,P1,todo
|
||||
cA*,651940,cAETH / cAUSDC,P1,todo
|
||||
cA*,651940,cAWALL / WALL,P1,todo
|
||||
cA*,651940,cAWALL / cAUSDC,P1,todo
|
||||
cW*,1,cWUSDT / USDC,,todo
|
||||
cW*,1,cWUSDC / USDC,,todo
|
||||
cW*,1,cWUSDT / USDT,,todo
|
||||
cW*,1,cWUSDC / USDT,,todo
|
||||
cW*,1,cWUSDT / cWUSDC,,todo
|
||||
cW*,1,cWEURC / USDC,,todo
|
||||
cW*,1,cWEURT / USDC,,todo
|
||||
cW*,1,cWGBPC / USDC,,todo
|
||||
cW*,1,cWGBPT / USDC,,todo
|
||||
cW*,1,cWAUDC / USDC,,todo
|
||||
cW*,1,cWJPYC / USDC,,todo
|
||||
cW*,1,cWCHFC / USDC,,todo
|
||||
cW*,1,cWCADC / USDC,,todo
|
||||
cW*,1,cWXAUC / USDC,,todo
|
||||
cW*,1,cWXAUT / USDC,,todo
|
||||
cW*,1,cWETH / WETH,,todo
|
||||
cW*,1,cWETH / USDC,,todo
|
||||
cW*,10,cWUSDT / USDC,,todo
|
||||
cW*,10,cWUSDC / USDC,,todo
|
||||
cW*,10,cWUSDT / USDT,,todo
|
||||
cW*,10,cWUSDC / USDT,,todo
|
||||
cW*,10,cWUSDT / cWUSDC,,todo
|
||||
cW*,10,cWEURC / USDC,,todo
|
||||
cW*,10,cWEURT / USDC,,todo
|
||||
cW*,10,cWGBPC / USDC,,todo
|
||||
cW*,10,cWGBPT / USDC,,todo
|
||||
cW*,10,cWAUDC / USDC,,todo
|
||||
cW*,10,cWJPYC / USDC,,todo
|
||||
cW*,10,cWCHFC / USDC,,todo
|
||||
cW*,10,cWCADC / USDC,,todo
|
||||
cW*,10,cWXAUC / USDC,,todo
|
||||
cW*,10,cWXAUT / USDC,,todo
|
||||
cW*,10,cWETHL2 / WETH,,todo
|
||||
cW*,10,cWETHL2 / USDC,,todo
|
||||
cW*,25,cWUSDT / USDC,,todo
|
||||
cW*,25,cWUSDC / USDC,,todo
|
||||
cW*,25,cWUSDT / USDT,,todo
|
||||
cW*,25,cWUSDC / USDT,,todo
|
||||
cW*,25,cWUSDT / cWUSDC,,todo
|
||||
cW*,25,cWEURC / USDC,,todo
|
||||
cW*,25,cWEURT / USDC,,todo
|
||||
cW*,25,cWGBPC / USDC,,todo
|
||||
cW*,25,cWGBPT / USDC,,todo
|
||||
cW*,25,cWAUDC / USDC,,todo
|
||||
cW*,25,cWJPYC / USDC,,todo
|
||||
cW*,25,cWCHFC / USDC,,todo
|
||||
cW*,25,cWCADC / USDC,,todo
|
||||
cW*,25,cWXAUC / USDC,,todo
|
||||
cW*,25,cWXAUT / USDC,,todo
|
||||
cW*,25,cWCRO / WCRO,,todo
|
||||
cW*,25,cWCRO / USDT,,todo
|
||||
cW*,56,cWUSDT / USDC,,todo
|
||||
cW*,56,cWUSDC / USDC,,todo
|
||||
cW*,56,cWUSDT / USDT,,todo
|
||||
cW*,56,cWUSDC / USDT,,todo
|
||||
cW*,56,cWUSDT / cWUSDC,,todo
|
||||
cW*,56,cWEURC / USDC,,todo
|
||||
cW*,56,cWEURT / USDC,,todo
|
||||
cW*,56,cWGBPC / USDC,,todo
|
||||
cW*,56,cWGBPT / USDC,,todo
|
||||
cW*,56,cWAUDC / USDC,,todo
|
||||
cW*,56,cWJPYC / USDC,,todo
|
||||
cW*,56,cWCHFC / USDC,,todo
|
||||
cW*,56,cWCADC / USDC,,todo
|
||||
cW*,56,cWXAUC / USDC,,todo
|
||||
cW*,56,cWXAUT / USDC,,todo
|
||||
cW*,56,cWBNB / WBNB,,todo
|
||||
cW*,56,cWBNB / USDT,,todo
|
||||
cW*,100,cWUSDT / USDC,,todo
|
||||
cW*,100,cWUSDC / USDC,,todo
|
||||
cW*,100,cWUSDT / USDT,,todo
|
||||
cW*,100,cWUSDC / USDT,,todo
|
||||
cW*,100,cWUSDT / cWUSDC,,todo
|
||||
cW*,100,cWEURC / USDC,,todo
|
||||
cW*,100,cWEURT / USDC,,todo
|
||||
cW*,100,cWGBPC / USDC,,todo
|
||||
cW*,100,cWGBPT / USDC,,todo
|
||||
cW*,100,cWAUDC / USDC,,todo
|
||||
cW*,100,cWJPYC / USDC,,todo
|
||||
cW*,100,cWCHFC / USDC,,todo
|
||||
cW*,100,cWCADC / USDC,,todo
|
||||
cW*,100,cWXAUC / USDC,,todo
|
||||
cW*,100,cWXAUT / USDC,,todo
|
||||
cW*,100,cWXDAI / WXDAI,,todo
|
||||
cW*,100,cWXDAI / USDC,,todo
|
||||
cW*,137,cWUSDT / USDC,,todo
|
||||
cW*,137,cWUSDC / USDC,,todo
|
||||
cW*,137,cWUSDT / USDT,,todo
|
||||
cW*,137,cWUSDC / USDT,,todo
|
||||
cW*,137,cWUSDT / cWUSDC,,todo
|
||||
cW*,137,cWEURC / USDC,,todo
|
||||
cW*,137,cWEURT / USDC,,todo
|
||||
cW*,137,cWGBPC / USDC,,todo
|
||||
cW*,137,cWGBPT / USDC,,todo
|
||||
cW*,137,cWAUDC / USDC,,todo
|
||||
cW*,137,cWJPYC / USDC,,todo
|
||||
cW*,137,cWCHFC / USDC,,todo
|
||||
cW*,137,cWCADC / USDC,,todo
|
||||
cW*,137,cWXAUC / USDC,,todo
|
||||
cW*,137,cWXAUT / USDC,,todo
|
||||
cW*,137,cWPOL / WPOL,,todo
|
||||
cW*,137,cWPOL / USDC,,todo
|
||||
cW*,8453,cWUSDT / USDC,,todo
|
||||
cW*,8453,cWUSDC / USDC,,todo
|
||||
cW*,8453,cWUSDT / USDT,,todo
|
||||
cW*,8453,cWUSDC / USDT,,todo
|
||||
cW*,8453,cWUSDT / cWUSDC,,todo
|
||||
cW*,8453,cWEURC / USDC,,todo
|
||||
cW*,8453,cWEURT / USDC,,todo
|
||||
cW*,8453,cWGBPC / USDC,,todo
|
||||
cW*,8453,cWGBPT / USDC,,todo
|
||||
cW*,8453,cWAUDC / USDC,,todo
|
||||
cW*,8453,cWJPYC / USDC,,todo
|
||||
cW*,8453,cWCHFC / USDC,,todo
|
||||
cW*,8453,cWCADC / USDC,,todo
|
||||
cW*,8453,cWXAUC / USDC,,todo
|
||||
cW*,8453,cWXAUT / USDC,,todo
|
||||
cW*,8453,cWETHL2 / WETH,,todo
|
||||
cW*,8453,cWETHL2 / USDC,,todo
|
||||
cW*,42161,cWUSDT / USDC,,todo
|
||||
cW*,42161,cWUSDC / USDC,,todo
|
||||
cW*,42161,cWUSDT / USDT,,todo
|
||||
cW*,42161,cWUSDC / USDT,,todo
|
||||
cW*,42161,cWUSDT / cWUSDC,,todo
|
||||
cW*,42161,cWEURC / USDC,,todo
|
||||
cW*,42161,cWEURT / USDC,,todo
|
||||
cW*,42161,cWGBPC / USDC,,todo
|
||||
cW*,42161,cWGBPT / USDC,,todo
|
||||
cW*,42161,cWAUDC / USDC,,todo
|
||||
cW*,42161,cWJPYC / USDC,,todo
|
||||
cW*,42161,cWCHFC / USDC,,todo
|
||||
cW*,42161,cWCADC / USDC,,todo
|
||||
cW*,42161,cWXAUC / USDC,,todo
|
||||
cW*,42161,cWXAUT / USDC,,todo
|
||||
cW*,42161,cWETHL2 / WETH,,todo
|
||||
cW*,42161,cWETHL2 / USDC,,todo
|
||||
cW*,42220,cWUSDT / USDC,,todo
|
||||
cW*,42220,cWUSDC / USDC,,todo
|
||||
cW*,42220,cWUSDT / USDT,,todo
|
||||
cW*,42220,cWUSDC / USDT,,todo
|
||||
cW*,42220,cWUSDT / cWUSDC,,todo
|
||||
cW*,42220,cWEURC / USDC,,todo
|
||||
cW*,42220,cWEURT / USDC,,todo
|
||||
cW*,42220,cWGBPC / USDC,,todo
|
||||
cW*,42220,cWGBPT / USDC,,todo
|
||||
cW*,42220,cWAUDC / USDC,,todo
|
||||
cW*,42220,cWJPYC / USDC,,todo
|
||||
cW*,42220,cWCHFC / USDC,,todo
|
||||
cW*,42220,cWCADC / USDC,,todo
|
||||
cW*,42220,cWXAUC / USDC,,todo
|
||||
cW*,42220,cWXAUT / USDC,,todo
|
||||
cW*,42220,cWCELO / WCELO,,todo
|
||||
cW*,42220,cWCELO / USDC,,todo
|
||||
cW*,43114,cWUSDT / USDC,,todo
|
||||
cW*,43114,cWUSDC / USDC,,todo
|
||||
cW*,43114,cWUSDT / USDT,,todo
|
||||
cW*,43114,cWUSDC / USDT,,todo
|
||||
cW*,43114,cWUSDT / cWUSDC,,todo
|
||||
cW*,43114,cWEURC / USDC,,todo
|
||||
cW*,43114,cWEURT / USDC,,todo
|
||||
cW*,43114,cWGBPC / USDC,,todo
|
||||
cW*,43114,cWGBPT / USDC,,todo
|
||||
cW*,43114,cWAUDC / USDC,,todo
|
||||
cW*,43114,cWJPYC / USDC,,todo
|
||||
cW*,43114,cWCHFC / USDC,,todo
|
||||
cW*,43114,cWCADC / USDC,,todo
|
||||
cW*,43114,cWXAUC / USDC,,todo
|
||||
cW*,43114,cWXAUT / USDC,,todo
|
||||
cW*,43114,cWAVAX / WAVAX,,todo
|
||||
cW*,43114,cWAVAX / USDC,,todo
|
||||
cW*,1111,cWUSDT / USDC,,planned
|
||||
cW*,1111,cWUSDC / USDC,,planned
|
||||
cW*,1111,cWUSDT / USDT,,planned
|
||||
cW*,1111,cWUSDC / USDT,,planned
|
||||
cW*,1111,cWUSDT / cWUSDC,,planned
|
||||
cW*,1111,cWEURC / USDC,,planned
|
||||
cW*,1111,cWEURT / USDC,,planned
|
||||
cW*,1111,cWGBPC / USDC,,planned
|
||||
cW*,1111,cWGBPT / USDC,,planned
|
||||
cW*,1111,cWAUDC / USDC,,planned
|
||||
cW*,1111,cWJPYC / USDC,,planned
|
||||
cW*,1111,cWCHFC / USDC,,planned
|
||||
cW*,1111,cWCADC / USDC,,planned
|
||||
cW*,1111,cWXAUC / USDC,,planned
|
||||
cW*,1111,cWXAUT / USDC,,planned
|
||||
cW*,1111,cWWEMIX / WWEMIX,,planned
|
||||
cW*,1111,cWWEMIX / USDC,,planned
|
||||
|
128
scripts/deployment/configure-gru-v2-mainnet-bridge-parity.sh
Executable file
128
scripts/deployment/configure-gru-v2-mainnet-bridge-parity.sh
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" >/dev/null 2>&1 || true
|
||||
source "${PROJECT_ROOT}/smom-dbis-138/scripts/load-env.sh" >/dev/null 2>&1 || true
|
||||
|
||||
need_cmd() {
|
||||
command -v "$1" >/dev/null 2>&1 || { echo "[fail] missing required command: $1" >&2; exit 1; }
|
||||
}
|
||||
|
||||
need_cmd cast
|
||||
|
||||
L1_BRIDGE="${CW_MULTITOKEN_BRIDGE_L1_138:-${CW_L1_BRIDGE_CHAIN138:-${CHAIN138_L1_BRIDGE:-0x152ed3e9912161b76bdfd368d0c84b7c31c10de7}}}"
|
||||
L2_BRIDGE="${CW_MULTITOKEN_BRIDGE_L2_MAINNET:-${CW_BRIDGE_MAINNET:-0x2bF74583206A49Be07E0E8A94197C12987AbD7B5}}"
|
||||
SELECTOR="${ETH_MAINNET_SELECTOR:-5009297550715157269}"
|
||||
RPC138="${RPC_URL_138:-}"
|
||||
RPC1="${ETHEREUM_MAINNET_RPC:-${ETH_MAINNET_RPC_URL:-}}"
|
||||
|
||||
CUSDT_V2="${COMPLIANT_USDT_V2:-0x9FBfab33882Efe0038DAa608185718b772EE5660}"
|
||||
CUSDC_V2="${COMPLIANT_USDC_V2:-0x219522c60e83dEe01FC5b0329d6fA8fD84b9D13d}"
|
||||
CWUSDT="${CWUSDT_V2_MAINNET:-${CWUSDT_MAINNET:-0x7E8FF0DcC974F290a29968e9350800a6df674447}}"
|
||||
CWUSDC="${CWUSDC_V2_MAINNET:-${CWUSDC_MAINNET:-0x3398ff0Bc56Fe3597E12BE6b191Cc92f10Eae53c}}"
|
||||
|
||||
ASSET="ALL"
|
||||
EXECUTE=0
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
bash scripts/deployment/configure-gru-v2-mainnet-bridge-parity.sh [--asset cUSDT_V2|cUSDC_V2] [--execute]
|
||||
|
||||
Dry-run by default. With --execute, broadcasts:
|
||||
1. L2 configureDestination(138, L1_BRIDGE, true)
|
||||
2. L2 configureTokenPair(V2 canonical, cW mirrored)
|
||||
3. L1 configureDestination(V2 canonical, MAINNET_SELECTOR, L2_BRIDGE, true)
|
||||
4. L1 configureSupportedCanonicalToken(V2 canonical, true) only when the deployed L1 bridge supports it
|
||||
EOF
|
||||
}
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--asset) shift ;;
|
||||
esac
|
||||
done
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--asset) ASSET="${2:-}"; shift 2 ;;
|
||||
--execute) EXECUTE=1; shift ;;
|
||||
--help|-h) usage; exit 0 ;;
|
||||
*) echo "[fail] unknown arg: $1" >&2; usage >&2; exit 2 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
[[ -n "$RPC138" && -n "$RPC1" ]] || { echo "[fail] RPC_URL_138 and ETHEREUM_MAINNET_RPC are required" >&2; exit 1; }
|
||||
if (( EXECUTE == 1 )); then
|
||||
[[ -n "${PRIVATE_KEY:-}" ]] || { echo "[fail] PRIVATE_KEY is required for --execute" >&2; exit 1; }
|
||||
fi
|
||||
|
||||
send_cast() {
|
||||
local rpc="$1" to="$2" sig="$3"
|
||||
shift 3
|
||||
if (( EXECUTE == 1 )); then
|
||||
cast send "$to" "$sig" "$@" --rpc-url "$rpc" --private-key "$PRIVATE_KEY" --legacy
|
||||
else
|
||||
printf 'cast send %q %q' "$to" "$sig"
|
||||
for part in "$@"; do
|
||||
printf ' %q' "$part"
|
||||
done
|
||||
printf ' --rpc-url %q --private-key "$PRIVATE_KEY" --legacy\n' "$rpc"
|
||||
fi
|
||||
}
|
||||
|
||||
print_state() {
|
||||
local label="$1" canonical="$2"
|
||||
echo "=== $label ==="
|
||||
echo "canonical=$canonical"
|
||||
echo "l1_destination=$(cast call "$L1_BRIDGE" 'destinations(address,uint64)((address,bool))' "$canonical" "$SELECTOR" --rpc-url "$RPC138" 2>/dev/null | tr '\n' ' ' || true)"
|
||||
echo "l1_supported=$(cast call "$L1_BRIDGE" 'supportedCanonicalToken(address)(bool)' "$canonical" --rpc-url "$RPC138" 2>/dev/null | awk '{print $1}' || true)"
|
||||
echo "l2_pair=$(cast call "$L2_BRIDGE" 'canonicalToMirrored(address)(address)' "$canonical" --rpc-url "$RPC1" 2>/dev/null | awk '{print $1}' || true)"
|
||||
echo "l2_destination=$(cast call "$L2_BRIDGE" 'destinations(uint64)((address,bool))' 138 --rpc-url "$RPC1" 2>/dev/null | tr '\n' ' ' || true)"
|
||||
}
|
||||
|
||||
l1_supports_supported_canonical_fn() {
|
||||
local canonical="$1"
|
||||
cast call "$L1_BRIDGE" 'supportedCanonicalToken(address)(bool)' "$canonical" --rpc-url "$RPC138" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
run_asset() {
|
||||
local label="$1" canonical="$2" mirrored="$3"
|
||||
print_state "$label" "$canonical"
|
||||
echo "plan_l2_destination:"
|
||||
send_cast "$RPC1" "$L2_BRIDGE" "configureDestination(uint64,address,bool)" 138 "$L1_BRIDGE" true
|
||||
echo "plan_l2_pair:"
|
||||
send_cast "$RPC1" "$L2_BRIDGE" "configureTokenPair(address,address)" "$canonical" "$mirrored"
|
||||
echo "plan_l1_destination:"
|
||||
send_cast "$RPC138" "$L1_BRIDGE" "configureDestination(address,uint64,address,bool)" "$canonical" "$SELECTOR" "$L2_BRIDGE" true
|
||||
if l1_supports_supported_canonical_fn "$canonical"; then
|
||||
echo "plan_l1_supported:"
|
||||
send_cast "$RPC138" "$L1_BRIDGE" "configureSupportedCanonicalToken(address,bool)" "$canonical" true
|
||||
else
|
||||
echo "plan_l1_supported: skipped (deployed L1 bridge does not expose supportedCanonicalToken(address)(bool); destination + fee-quote path is authoritative)"
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
case "$ASSET" in
|
||||
ALL)
|
||||
run_asset "cUSDT_V2" "$CUSDT_V2" "$CWUSDT"
|
||||
run_asset "cUSDC_V2" "$CUSDC_V2" "$CWUSDC"
|
||||
;;
|
||||
cUSDT_V2)
|
||||
run_asset "cUSDT_V2" "$CUSDT_V2" "$CWUSDT"
|
||||
;;
|
||||
cUSDC_V2)
|
||||
run_asset "cUSDC_V2" "$CUSDC_V2" "$CWUSDC"
|
||||
;;
|
||||
*)
|
||||
echo "[fail] unsupported asset: $ASSET" >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
if (( EXECUTE == 0 )); then
|
||||
echo "Dry-run only. Re-run with --execute to broadcast."
|
||||
fi
|
||||
@@ -1,152 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
SUBMODULE_ROOT="$PROJECT_ROOT/atomic-swap-dapp"
|
||||
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
|
||||
PROXMOX_HOST="${PROXMOX_DAPP_HOST:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
|
||||
VMID="${VMID:-5801}"
|
||||
DEPLOY_ROOT="${DEPLOY_ROOT:-/var/www/atomic-swap}"
|
||||
TMP_ARCHIVE="/tmp/atomic-swap-dapp-5801.tgz"
|
||||
DIST_DIR="$SUBMODULE_ROOT/dist"
|
||||
SKIP_BUILD="${SKIP_BUILD:-0}"
|
||||
SSH_OPTS="${SSH_OPTS:--o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new}"
|
||||
|
||||
cleanup() {
|
||||
rm -f "$TMP_ARCHIVE"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
if [ ! -d "$SUBMODULE_ROOT" ]; then
|
||||
echo "Missing submodule at $SUBMODULE_ROOT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SUBMODULE_ROOT"
|
||||
if [ "$SKIP_BUILD" != "1" ]; then
|
||||
if [ -f package-lock.json ]; then
|
||||
npm ci >/dev/null
|
||||
else
|
||||
npm install >/dev/null
|
||||
fi
|
||||
npm run sync:ecosystem >/dev/null
|
||||
npm run validate:manifest >/dev/null
|
||||
npm run build >/dev/null
|
||||
fi
|
||||
|
||||
for required_path in \
|
||||
"$DIST_DIR/index.html" \
|
||||
"$DIST_DIR/data/ecosystem-manifest.json" \
|
||||
"$DIST_DIR/data/live-route-registry.json" \
|
||||
"$DIST_DIR/data/deployed-venue-inventory.json"; do
|
||||
if [ ! -f "$required_path" ]; then
|
||||
echo "Missing required build artifact: $required_path" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
jq -e '.supportedNetworks[] | select(.chainId == 138) | .deployedVenuePoolCount >= 19 and .publicRoutingPoolCount >= 19' \
|
||||
"$DIST_DIR/data/ecosystem-manifest.json" >/dev/null
|
||||
jq -e '.liveSwapRoutes | length >= 19' "$DIST_DIR/data/live-route-registry.json" >/dev/null
|
||||
jq -e '.liveBridgeRoutes | length >= 12' "$DIST_DIR/data/live-route-registry.json" >/dev/null
|
||||
jq -e '.networks[] | select(.chainId == 138) | .venueCounts.deployedVenuePoolCount >= 19 and .summary.totalVenues >= 19' \
|
||||
"$DIST_DIR/data/deployed-venue-inventory.json" >/dev/null
|
||||
|
||||
rm -f "$TMP_ARCHIVE"
|
||||
tar -C "$SUBMODULE_ROOT" -czf "$TMP_ARCHIVE" dist
|
||||
|
||||
ssh $SSH_OPTS "root@$PROXMOX_HOST" true
|
||||
scp -q $SSH_OPTS "$TMP_ARCHIVE" "root@$PROXMOX_HOST:/tmp/atomic-swap-dapp-5801.tgz"
|
||||
|
||||
ssh $SSH_OPTS "root@$PROXMOX_HOST" "
|
||||
set -euo pipefail
|
||||
pct push $VMID /tmp/atomic-swap-dapp-5801.tgz /tmp/atomic-swap-dapp-5801.tgz
|
||||
pct exec $VMID -- bash -lc '
|
||||
set -euo pipefail
|
||||
mkdir -p \"$DEPLOY_ROOT\"
|
||||
find \"$DEPLOY_ROOT\" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
|
||||
rm -rf /tmp/dist
|
||||
tar -xzf /tmp/atomic-swap-dapp-5801.tgz -C /tmp
|
||||
cp -R /tmp/dist/. \"$DEPLOY_ROOT/\"
|
||||
mkdir -p /var/cache/nginx/atomic-swap-api
|
||||
cat > /etc/nginx/conf.d/atomic-swap-api-cache.conf <<\"EOF\"
|
||||
proxy_cache_path /var/cache/nginx/atomic-swap-api
|
||||
levels=1:2
|
||||
keys_zone=atomic_swap_api_cache:10m
|
||||
max_size=256m
|
||||
inactive=30m
|
||||
use_temp_path=off;
|
||||
EOF
|
||||
cat > /etc/nginx/sites-available/atomic-swap <<\"EOF\"
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
|
||||
root $DEPLOY_ROOT;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files \$uri \$uri/ /index.html;
|
||||
}
|
||||
|
||||
location = /index.html {
|
||||
add_header Cache-Control \"no-store, no-cache, must-revalidate\" always;
|
||||
}
|
||||
|
||||
location /data/ {
|
||||
add_header Cache-Control \"no-store, no-cache, must-revalidate\" always;
|
||||
}
|
||||
|
||||
location /assets/ {
|
||||
add_header Cache-Control \"public, max-age=31536000, immutable\" always;
|
||||
}
|
||||
|
||||
location /api/v1/ {
|
||||
proxy_pass https://explorer.d-bis.org/api/v1/;
|
||||
proxy_ssl_server_name on;
|
||||
proxy_set_header Host explorer.d-bis.org;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host \$host;
|
||||
proxy_http_version 1.1;
|
||||
proxy_buffering on;
|
||||
proxy_cache atomic_swap_api_cache;
|
||||
proxy_cache_methods GET HEAD;
|
||||
proxy_cache_key \"\$scheme\$proxy_host\$request_uri\";
|
||||
proxy_cache_lock on;
|
||||
proxy_cache_lock_timeout 10s;
|
||||
proxy_cache_lock_age 10s;
|
||||
proxy_cache_background_update on;
|
||||
proxy_cache_revalidate on;
|
||||
proxy_cache_valid 200 10s;
|
||||
proxy_cache_valid 404 1s;
|
||||
proxy_cache_valid any 0;
|
||||
proxy_cache_use_stale error timeout invalid_header updating http_429 http_500 http_502 http_503 http_504;
|
||||
add_header X-Atomic-Swap-Cache \$upstream_cache_status always;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
ln -sfn /etc/nginx/sites-available/atomic-swap /etc/nginx/sites-enabled/atomic-swap
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
rm -f /etc/nginx/sites-enabled/dapp
|
||||
nginx -t
|
||||
systemctl reload nginx
|
||||
curl -fsS http://127.0.0.1/index.html >/dev/null
|
||||
curl -fsS http://127.0.0.1/data/ecosystem-manifest.json >/dev/null
|
||||
curl -fsS http://127.0.0.1/data/live-route-registry.json >/dev/null
|
||||
curl -fsS http://127.0.0.1/data/deployed-venue-inventory.json >/dev/null
|
||||
rm -rf /tmp/dist /tmp/atomic-swap-dapp-5801.tgz
|
||||
'
|
||||
rm -f /tmp/atomic-swap-dapp-5801.tgz
|
||||
"
|
||||
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/ >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/ecosystem-manifest.json | jq -e '.supportedNetworks[] | select(.chainId == 138) | .deployedVenuePoolCount >= 19 and .publicRoutingPoolCount >= 19' >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/live-route-registry.json | jq -e '.liveSwapRoutes | length >= 19' >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/live-route-registry.json | jq -e '.liveBridgeRoutes | length >= 12' >/dev/null
|
||||
curl -fsS https://atomic-swap.defi-oracle.io/data/deployed-venue-inventory.json | jq -e '.networks[] | select(.chainId == 138) | .venueCounts.deployedVenuePoolCount >= 19 and .summary.totalVenues >= 19' >/dev/null
|
||||
|
||||
echo "Deployed atomic-swap-dapp to VMID $VMID via $PROXMOX_HOST"
|
||||
@@ -1,244 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
source "$PROJECT_ROOT/scripts/lib/load-project-env.sh"
|
||||
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
|
||||
|
||||
PHOENIX_DEPLOY_WORKSPACE="${PHOENIX_DEPLOY_WORKSPACE:-}"
|
||||
PROXMOX_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
|
||||
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-root}"
|
||||
VMID="${CURRENCICOMBO_PHOENIX_VMID:-8604}"
|
||||
CT_IP="${IP_CURRENCICOMBO_PHOENIX:-10.160.0.14}"
|
||||
CT_REPO_DIR="${CT_REPO_DIR:-/var/lib/currencicombo/repo}"
|
||||
PUBLIC_URL="${PUBLIC_URL:-https://curucombo.xn--vov0g.com}"
|
||||
PUBLIC_DOMAIN="${PUBLIC_DOMAIN:-curucombo.xn--vov0g.com}"
|
||||
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS:-192.168.11.167}:81}"
|
||||
NPM_EMAIL="${NPM_EMAIL:-}"
|
||||
NPM_PASSWORD="${NPM_PASSWORD:-}"
|
||||
DRY_RUN=0
|
||||
|
||||
usage() {
|
||||
cat <<'USAGE'
|
||||
Usage: phoenix-deploy-currencicombo-from-workspace.sh [--dry-run]
|
||||
|
||||
Requires:
|
||||
PHOENIX_DEPLOY_WORKSPACE Full staged CurrenciCombo checkout prepared by phoenix-deploy-api
|
||||
|
||||
This script:
|
||||
1. Packs the staged repo workspace.
|
||||
2. Pushes it into CT 8604 on r630-01.
|
||||
3. Ensures host prerequisites, install.sh, prune cron, and deploy script run in-CT.
|
||||
4. Updates the public NPMplus host so /api/* preserves the full path and supports SSE.
|
||||
5. Verifies the public portal + /api/ready end to end.
|
||||
USAGE
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--dry-run) DRY_RUN=1; shift ;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*) echo "unknown arg: $1" >&2; usage; exit 2 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
log() { printf '[currencicombo-phoenix] %s\n' "$*" >&2; }
|
||||
die() { printf '[currencicombo-phoenix][FATAL] %s\n' "$*" >&2; exit 1; }
|
||||
run() { if [[ "$DRY_RUN" -eq 1 ]]; then printf '[dry-run] %s\n' "$*" >&2; else eval "$*"; fi; }
|
||||
need_cmd() { command -v "$1" >/dev/null 2>&1 || die "missing required command: $1"; }
|
||||
|
||||
for cmd in ssh scp tar curl jq mktemp; do
|
||||
need_cmd "$cmd"
|
||||
done
|
||||
|
||||
[[ -n "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "PHOENIX_DEPLOY_WORKSPACE is required"
|
||||
[[ -d "$PHOENIX_DEPLOY_WORKSPACE" ]] || die "staged workspace missing: $PHOENIX_DEPLOY_WORKSPACE"
|
||||
|
||||
if [[ "$DRY_RUN" -eq 0 ]]; then
|
||||
[[ -n "$NPM_EMAIL" ]] || die "NPM_EMAIL is required"
|
||||
[[ -n "$NPM_PASSWORD" ]] || die "NPM_PASSWORD is required"
|
||||
fi
|
||||
|
||||
SSH_TARGET="${PROXMOX_SSH_USER}@${PROXMOX_HOST}"
|
||||
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
|
||||
TMP_DIR="$(mktemp -d /tmp/currencicombo-phoenix-XXXXXX)"
|
||||
ARCHIVE_PATH="${TMP_DIR}/currencicombo-workspace.tgz"
|
||||
REMOTE_ARCHIVE="/tmp/$(basename "$ARCHIVE_PATH")"
|
||||
CT_ARCHIVE="/root/$(basename "$ARCHIVE_PATH")"
|
||||
NPM_COOKIE_JAR="${TMP_DIR}/npm-cookies.txt"
|
||||
cleanup() {
|
||||
rm -rf "$TMP_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
ssh_remote() {
|
||||
local cmd="$1"
|
||||
if [[ "$DRY_RUN" -eq 1 ]]; then
|
||||
printf '[dry-run] ssh %q %q\n' "$SSH_TARGET" "$cmd" >&2
|
||||
else
|
||||
ssh "${SSH_OPTS[@]}" "$SSH_TARGET" "$cmd"
|
||||
fi
|
||||
}
|
||||
|
||||
pct_exec_script() {
|
||||
local local_script="$1"
|
||||
local remote_script
|
||||
local ct_script
|
||||
remote_script="/tmp/$(basename "$local_script")"
|
||||
ct_script="/root/$(basename "$local_script")"
|
||||
run "scp ${SSH_OPTS[*]} '$local_script' '${SSH_TARGET}:${remote_script}'"
|
||||
ssh_remote "pct push ${VMID} '${remote_script}' '${ct_script}' --perms 0755 && rm -f '${remote_script}' && pct exec ${VMID} -- bash '${ct_script}' && pct exec ${VMID} -- rm -f '${ct_script}'"
|
||||
}
|
||||
|
||||
log "packing staged workspace from ${PHOENIX_DEPLOY_WORKSPACE}"
|
||||
run "tar -C '$PHOENIX_DEPLOY_WORKSPACE' --exclude='.git' --exclude='node_modules' --exclude='dist' --exclude='orchestrator/node_modules' --exclude='orchestrator/dist' -czf '$ARCHIVE_PATH' ."
|
||||
|
||||
log "ensuring CT ${VMID} is running on ${PROXMOX_HOST}"
|
||||
ssh_remote "pct start ${VMID} >/dev/null 2>&1 || true"
|
||||
|
||||
log "uploading staged archive to CT ${VMID}"
|
||||
run "scp ${SSH_OPTS[*]} '$ARCHIVE_PATH' '${SSH_TARGET}:${REMOTE_ARCHIVE}'"
|
||||
ssh_remote "pct push ${VMID} '${REMOTE_ARCHIVE}' '${CT_ARCHIVE}' && rm -f '${REMOTE_ARCHIVE}'"
|
||||
|
||||
CT_SCRIPT="${TMP_DIR}/currencicombo-ct-deploy.sh"
|
||||
cat > "$CT_SCRIPT" <<'EOF'
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
ARCHIVE_PATH="__CT_ARCHIVE__"
|
||||
REPO_DIR="__CT_REPO_DIR__"
|
||||
|
||||
need_pkg() {
|
||||
dpkg -s "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
apt-get update -qq
|
||||
for pkg in ca-certificates curl git jq postgresql redis-server rsync build-essential; do
|
||||
need_pkg "$pkg" || apt-get install -y -qq "$pkg"
|
||||
done
|
||||
|
||||
if ! command -v node >/dev/null 2>&1 || ! node -v 2>/dev/null | grep -q '^v20\.'; then
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
|
||||
apt-get install -y -qq nodejs
|
||||
fi
|
||||
|
||||
systemctl enable --now postgresql >/dev/null 2>&1 || true
|
||||
systemctl enable --now redis-server >/dev/null 2>&1 || true
|
||||
|
||||
if [[ ! -f /root/currencicombo-prephoenix-archive.tgz && -d /opt/currencicombo ]]; then
|
||||
tar -czf /root/currencicombo-prephoenix-archive.tgz /opt/currencicombo /etc/currencicombo 2>/dev/null || true
|
||||
fi
|
||||
|
||||
install -d -o root -g root -m 0755 "$(dirname "$REPO_DIR")"
|
||||
rm -rf "$REPO_DIR"
|
||||
mkdir -p "$REPO_DIR"
|
||||
tar -xzf "$ARCHIVE_PATH" -C "$REPO_DIR"
|
||||
rm -f "$ARCHIVE_PATH"
|
||||
|
||||
bash "$REPO_DIR/scripts/deployment/install.sh"
|
||||
bash "$REPO_DIR/scripts/deployment/install-prune-cron.sh"
|
||||
CC_GIT_REF=local bash "$REPO_DIR/scripts/deployment/deploy-currencicombo-8604.sh"
|
||||
systemctl is-active currencicombo-orchestrator.service currencicombo-webapp.service
|
||||
curl -fsS http://127.0.0.1:8080/ready
|
||||
curl -fsS http://127.0.0.1:3000/ >/dev/null
|
||||
EOF
|
||||
perl -0pi -e "s|__CT_ARCHIVE__|${CT_ARCHIVE//|/\\|}|g; s|__CT_REPO_DIR__|${CT_REPO_DIR//|/\\|}|g" "$CT_SCRIPT"
|
||||
|
||||
log "running install + deploy inside CT ${VMID}"
|
||||
pct_exec_script "$CT_SCRIPT"
|
||||
|
||||
if [[ "$DRY_RUN" -eq 0 ]]; then
|
||||
log "updating NPMplus proxy host for ${PUBLIC_DOMAIN}"
|
||||
AUTH_JSON="$(jq -nc --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')"
|
||||
TOKEN_RESPONSE="$(curl -sk -X POST "$NPM_URL/api/tokens" -H 'Content-Type: application/json' -d "$AUTH_JSON" -c "$NPM_COOKIE_JAR")"
|
||||
TOKEN="$(echo "$TOKEN_RESPONSE" | jq -r '.token // .accessToken // .access_token // .data.token // empty' 2>/dev/null)"
|
||||
USE_COOKIE_AUTH=0
|
||||
if [[ -z "$TOKEN" || "$TOKEN" == "null" ]]; then
|
||||
if echo "$TOKEN_RESPONSE" | jq -e '.expires' >/dev/null 2>&1; then
|
||||
USE_COOKIE_AUTH=1
|
||||
else
|
||||
die "NPMplus authentication failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
npm_api() {
|
||||
if [[ "$USE_COOKIE_AUTH" -eq 1 ]]; then
|
||||
curl -sk -b "$NPM_COOKIE_JAR" "$@"
|
||||
else
|
||||
curl -sk -H "Authorization: Bearer $TOKEN" "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
HOSTS_JSON="$(npm_api -X GET "$NPM_URL/api/nginx/proxy-hosts")"
|
||||
HOST_ID="$(echo "$HOSTS_JSON" | jq -r --arg domain "$PUBLIC_DOMAIN" '
|
||||
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
|
||||
| map(select(.domain_names | type == "array"))
|
||||
| map(select(any(.domain_names[]; . == $domain)))
|
||||
| .[0].id // empty
|
||||
')"
|
||||
[[ -n "$HOST_ID" ]] || die "NPMplus proxy host not found for ${PUBLIC_DOMAIN}"
|
||||
|
||||
ADVANCED_CONFIG="$(cat <<CFG
|
||||
location ^~ /api/ {
|
||||
proxy_pass http://${CT_IP}:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_set_header Connection \"\";
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
proxy_read_timeout 24h;
|
||||
proxy_send_timeout 24h;
|
||||
add_header Cache-Control \"no-cache\";
|
||||
}
|
||||
CFG
|
||||
)"
|
||||
|
||||
PAYLOAD="$(echo "$HOSTS_JSON" | jq -c --arg domain "$PUBLIC_DOMAIN" --arg host "$CT_IP" --arg advanced "$ADVANCED_CONFIG" '
|
||||
(if type == "array" then . elif .data != null then .data elif .result != null then .result else [] end)
|
||||
| map(select(.domain_names | type == "array"))
|
||||
| map(select(any(.domain_names[]; . == $domain)))
|
||||
| .[0]
|
||||
| {
|
||||
domain_names,
|
||||
forward_scheme: (.forward_scheme // "http"),
|
||||
forward_host: $host,
|
||||
forward_port: 3000,
|
||||
access_list_id,
|
||||
certificate_id,
|
||||
ssl_forced,
|
||||
caching_enabled,
|
||||
block_exploits,
|
||||
advanced_config: $advanced,
|
||||
allow_websocket_upgrade,
|
||||
http2_support,
|
||||
hsts_enabled,
|
||||
hsts_subdomains,
|
||||
enabled
|
||||
}
|
||||
')"
|
||||
[[ -n "$PAYLOAD" && "$PAYLOAD" != "null" ]] || die "failed to build NPMplus update payload"
|
||||
UPDATE_RESPONSE="$(npm_api -X PUT "$NPM_URL/api/nginx/proxy-hosts/${HOST_ID}" -H 'Content-Type: application/json' -d "$PAYLOAD")"
|
||||
echo "$UPDATE_RESPONSE" | jq -e '.id != null' >/dev/null 2>&1 || die "NPMplus proxy host update failed"
|
||||
|
||||
log "running public smoke checks"
|
||||
HEADERS="$(curl -skI "$PUBLIC_URL/")"
|
||||
echo "$HEADERS" | grep -q '^HTTP/2 200' || die "public root is not HTTP 200"
|
||||
if echo "$HEADERS" | grep -qi '^x-nextjs-prerender:'; then
|
||||
die "old Next.js headers still present on public root"
|
||||
fi
|
||||
|
||||
curl -sk "$PUBLIC_URL/" | grep -F '<title>Solace Bank Group PLC — Treasury Management Portal</title>' >/dev/null || die "public title mismatch"
|
||||
READY_BODY="$(curl -sk "$PUBLIC_URL/api/ready")"
|
||||
echo "$READY_BODY" | grep -F '"ready":true' >/dev/null || die "public /api/ready failed"
|
||||
curl -skN --max-time 5 -H 'Accept: text/event-stream' "$PUBLIC_URL/api/plans/demo-pay-014/status/stream" | grep -F '"type":"connected"' >/dev/null || die "public SSE smoke failed"
|
||||
|
||||
log "capturing EXT-* blocker summary"
|
||||
ssh_remote "pct exec ${VMID} -- journalctl -u currencicombo-orchestrator.service -n 200 --no-pager | grep -E 'ExternalBlockers|EXT-' || true"
|
||||
fi
|
||||
|
||||
log "CurrenciCombo Phoenix deploy completed from ${PHOENIX_DEPLOY_WORKSPACE}"
|
||||
335
scripts/deployment/plan-gru-v2-wave1-public-pools.sh
Executable file
335
scripts/deployment/plan-gru-v2-wave1-public-pools.sh
Executable file
@@ -0,0 +1,335 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
OUTPUT_PATH="${ROOT_DIR}/reports/extraction/gru-v2-wave1-public-deploy-plan-latest.json"
|
||||
POLICY_PATH="${ROOT_DIR}/config/extraction/gru-v2-wave1-public-seed-policy.json"
|
||||
GAP_REPORT_PATH="${ROOT_DIR}/reports/extraction/gru-v2-wave1-public-gap-report-latest.json"
|
||||
|
||||
source "${ROOT_DIR}/scripts/lib/load-project-env.sh" >/dev/null 2>&1 || true
|
||||
if [[ -f "${ROOT_DIR}/smom-dbis-138/scripts/lib/deployment/dotenv.sh" ]]; then
|
||||
# shellcheck disable=SC1090
|
||||
source "${ROOT_DIR}/smom-dbis-138/scripts/lib/deployment/dotenv.sh" >/dev/null 2>&1 || true
|
||||
load_deployment_env --repo-root "${ROOT_DIR}/smom-dbis-138" >/dev/null 2>&1 || true
|
||||
export PROJECT_ROOT="${ROOT_DIR}"
|
||||
fi
|
||||
|
||||
mkdir -p "$(dirname "$OUTPUT_PATH")"
|
||||
|
||||
python3 - <<'PY' "$ROOT_DIR" "$OUTPUT_PATH" "$POLICY_PATH" "$GAP_REPORT_PATH"
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import Counter
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
project_root = Path(sys.argv[1])
|
||||
output_path = Path(sys.argv[2])
|
||||
policy_path = Path(sys.argv[3])
|
||||
gap_report_path = Path(sys.argv[4])
|
||||
|
||||
if not gap_report_path.exists():
|
||||
subprocess.check_call(
|
||||
["bash", "scripts/verify/build-gru-v2-wave1-public-gap-report.sh"],
|
||||
cwd=project_root,
|
||||
)
|
||||
|
||||
deployment_status = json.loads((project_root / "cross-chain-pmm-lps/config/deployment-status.json").read_text())
|
||||
policy = json.loads(policy_path.read_text())
|
||||
gap_report = json.loads(gap_report_path.read_text())
|
||||
|
||||
chain_suffix = {
|
||||
1: "MAINNET",
|
||||
10: "OPTIMISM",
|
||||
25: "CRONOS",
|
||||
56: "BSC",
|
||||
100: "GNOSIS",
|
||||
137: "POLYGON",
|
||||
1111: "WEMIX",
|
||||
8453: "BASE",
|
||||
42161: "ARBITRUM",
|
||||
42220: "CELO",
|
||||
43114: "AVALANCHE",
|
||||
}
|
||||
|
||||
rpc_env_key = {
|
||||
1: "ETHEREUM_MAINNET_RPC",
|
||||
10: "OPTIMISM_RPC_URL",
|
||||
25: "CRONOS_RPC_URL",
|
||||
56: "BSC_RPC_URL",
|
||||
100: "GNOSIS_MAINNET_RPC",
|
||||
137: "POLYGON_RPC_URL",
|
||||
1111: "WEMIX_RPC",
|
||||
8453: "BASE_RPC_URL",
|
||||
42161: "ARBITRUM_RPC_URL",
|
||||
42220: "CELO_RPC_URL",
|
||||
43114: "AVALANCHE_RPC_URL",
|
||||
}
|
||||
|
||||
integration_env_key = {
|
||||
1: "DODO_PMM_INTEGRATION_MAINNET",
|
||||
10: "DODO_PMM_INTEGRATION_OPTIMISM",
|
||||
25: "DODO_PMM_INTEGRATION_CRONOS",
|
||||
56: "DODO_PMM_INTEGRATION_BSC",
|
||||
100: "DODO_PMM_INTEGRATION_GNOSIS",
|
||||
137: "DODO_PMM_INTEGRATION_POLYGON",
|
||||
1111: "DODO_PMM_INTEGRATION_WEMIX",
|
||||
8453: "DODO_PMM_INTEGRATION_BASE",
|
||||
42161: "DODO_PMM_INTEGRATION_ARBITRUM",
|
||||
42220: "DODO_PMM_INTEGRATION_CELO",
|
||||
43114: "DODO_PMM_INTEGRATION_AVALANCHE",
|
||||
}
|
||||
|
||||
private_key = os.environ.get("PRIVATE_KEY", "")
|
||||
live_checks = os.environ.get("GRU_WAVE1_PLAN_LIVE_CHECKS", "").strip().lower() in {"1", "true", "yes", "on"}
|
||||
include_allowances = os.environ.get("GRU_WAVE1_PLAN_INCLUDE_ALLOWANCES", "").strip().lower() in {"1", "true", "yes", "on"}
|
||||
call_timeout = max(1, int(os.environ.get("GRU_WAVE1_CALL_TIMEOUT_SEC", "2")))
|
||||
estimate_timeout = max(1, int(os.environ.get("GRU_WAVE1_ESTIMATE_TIMEOUT_SEC", "2")))
|
||||
deployer = ""
|
||||
if private_key and live_checks:
|
||||
try:
|
||||
deployer = subprocess.check_output(
|
||||
["cast", "wallet", "address", "--private-key", private_key],
|
||||
text=True,
|
||||
timeout=call_timeout,
|
||||
).strip()
|
||||
except Exception:
|
||||
deployer = ""
|
||||
|
||||
defaults = policy.get("defaults", {})
|
||||
symbol_defaults = policy.get("symbol_defaults", {})
|
||||
pair_overrides = policy.get("pair_overrides", {})
|
||||
|
||||
rows = []
|
||||
|
||||
def merge_policy(base_symbol: str, pair_key: str):
|
||||
merged = dict(defaults)
|
||||
merged.update(symbol_defaults.get(base_symbol, {}))
|
||||
merged.update(pair_overrides.get(pair_key, {}))
|
||||
return merged
|
||||
|
||||
def compute_quote_amount(base_raw: str, price_e18: str):
|
||||
if not base_raw or not price_e18:
|
||||
return None
|
||||
return str((int(base_raw) * int(price_e18)) // 10**18)
|
||||
|
||||
def call_single(rpc_url: str, to: str, sig: str, *args):
|
||||
try:
|
||||
out = subprocess.check_output(
|
||||
["cast", "call", to, sig, *args, "--rpc-url", rpc_url],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
timeout=call_timeout,
|
||||
).strip()
|
||||
return out.split()[0] if out else ""
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def estimate_single(rpc_url: str, to: str, sig: str, *args):
|
||||
try:
|
||||
out = subprocess.check_output(
|
||||
["cast", "estimate", to, sig, *args, "--rpc-url", rpc_url],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
timeout=estimate_timeout,
|
||||
).strip()
|
||||
return out.split()[0] if out else ""
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
for missing in gap_report["missing_first_tier_wave1_pools"]:
|
||||
chain_id = missing["chain_id"]
|
||||
network = missing["network"]
|
||||
pair = missing["pair"]
|
||||
base_symbol, quote_symbol = [part.strip() for part in pair.split("/")]
|
||||
suffix = chain_suffix.get(chain_id, "")
|
||||
rpc_key = rpc_env_key.get(chain_id, "")
|
||||
integration_key = integration_env_key.get(chain_id, "")
|
||||
base_env_key = f"{base_symbol.upper()}_{suffix}" if suffix else ""
|
||||
pair_key = pair.lower().replace("/", "-").replace(" ", "")
|
||||
|
||||
chain_state = deployment_status["chains"].get(str(chain_id), {})
|
||||
quote_address = (chain_state.get("anchorAddresses") or {}).get(quote_symbol, "")
|
||||
rpc_url = os.environ.get(rpc_key, "")
|
||||
integration = os.environ.get(integration_key, "")
|
||||
base_address = os.environ.get(base_env_key, "")
|
||||
|
||||
cfg = merge_policy(base_symbol, pair_key)
|
||||
initial_price = cfg.get("initial_price_e18")
|
||||
base_amount = str(cfg.get("base_amount_raw")) if cfg.get("base_amount_raw") is not None else None
|
||||
quote_amount = str(cfg.get("quote_amount_raw")) if cfg.get("quote_amount_raw") is not None else None
|
||||
if quote_amount is None and base_amount and initial_price:
|
||||
quote_amount = compute_quote_amount(base_amount, initial_price)
|
||||
mint_amount = str(cfg.get("mint_base_amount_raw")) if cfg.get("mint_base_amount_raw") is not None else None
|
||||
fee_bps = str(cfg.get("fee_bps", 3))
|
||||
k_value = str(cfg.get("k", "500000000000000000"))
|
||||
open_twap = bool(cfg.get("open_twap", False))
|
||||
price_mode = cfg.get("price_mode", "unspecified")
|
||||
|
||||
blockers = []
|
||||
if not rpc_url:
|
||||
blockers.append(f"missing_rpc_env:{rpc_key}")
|
||||
if not integration:
|
||||
blockers.append(f"missing_integration_env:{integration_key}")
|
||||
if not base_address:
|
||||
blockers.append(f"missing_base_token_env:{base_env_key}")
|
||||
if not quote_address:
|
||||
blockers.append(f"missing_quote_anchor:{quote_symbol}")
|
||||
if not initial_price:
|
||||
blockers.append("missing_initial_price_e18")
|
||||
if not base_amount:
|
||||
blockers.append("missing_base_amount_raw")
|
||||
if not quote_amount:
|
||||
blockers.append("missing_quote_amount_raw")
|
||||
if price_mode == "bootstrap_reference":
|
||||
blockers.append("bootstrap_price_requires_operator_review")
|
||||
if not private_key:
|
||||
blockers.append("missing_private_key")
|
||||
|
||||
existing_pool = ""
|
||||
base_balance = ""
|
||||
quote_balance = ""
|
||||
base_allowance = ""
|
||||
quote_allowance = ""
|
||||
mintable_base = False
|
||||
base_supply_mode = "unknown"
|
||||
if live_checks and rpc_url and integration and base_address and quote_address:
|
||||
existing_pool = call_single(rpc_url, integration, "pools(address,address)(address)", base_address, quote_address)
|
||||
if deployer:
|
||||
base_balance = call_single(rpc_url, base_address, "balanceOf(address)(uint256)", deployer)
|
||||
quote_balance = call_single(rpc_url, quote_address, "balanceOf(address)(uint256)", deployer)
|
||||
if include_allowances:
|
||||
base_allowance = call_single(rpc_url, base_address, "allowance(address,address)(uint256)", deployer, integration)
|
||||
quote_allowance = call_single(rpc_url, quote_address, "allowance(address,address)(uint256)", deployer, integration)
|
||||
|
||||
live_missing = existing_pool in ("", "0x0000000000000000000000000000000000000000")
|
||||
if not live_missing:
|
||||
blockers = [b for b in blockers if b not in {"missing_base_amount_raw", "missing_quote_amount_raw"}]
|
||||
|
||||
has_seed_amounts = bool(base_amount and quote_amount)
|
||||
create_ready = all(
|
||||
token not in blockers
|
||||
for token in [
|
||||
f"missing_rpc_env:{rpc_key}",
|
||||
f"missing_integration_env:{integration_key}",
|
||||
f"missing_base_token_env:{base_env_key}",
|
||||
f"missing_quote_anchor:{quote_symbol}",
|
||||
"missing_initial_price_e18",
|
||||
"missing_private_key",
|
||||
]
|
||||
) and live_missing
|
||||
|
||||
create_ready_with_bootstrap_price = create_ready
|
||||
create_ready = create_ready and price_mode != "bootstrap_reference"
|
||||
|
||||
seed_ready = create_ready_with_bootstrap_price and has_seed_amounts
|
||||
if base_balance and base_amount:
|
||||
if int(base_balance) >= int(base_amount):
|
||||
base_supply_mode = "wallet_balance"
|
||||
else:
|
||||
gap = int(base_amount) - int(base_balance)
|
||||
if live_checks and deployer:
|
||||
mintable_base = bool(estimate_single(rpc_url, base_address, "mint(address,uint256)", deployer, str(max(gap, 1))))
|
||||
if mintable_base and mint_amount and int(base_balance) + int(mint_amount) >= int(base_amount):
|
||||
base_supply_mode = "mintable_gap"
|
||||
else:
|
||||
base_supply_mode = "insufficient"
|
||||
blockers.append("insufficient_base_balance")
|
||||
if quote_balance and quote_amount and int(quote_balance) < int(quote_amount):
|
||||
blockers.append("insufficient_quote_balance")
|
||||
|
||||
create_cmd = (
|
||||
f"cast send {integration} "
|
||||
f"'createPool(address,address,uint256,uint256,uint256,bool)(address)' "
|
||||
f"{base_address} {quote_address} {fee_bps} {initial_price or '<initial_price_e18>'} {k_value} "
|
||||
f"{str(open_twap).lower()} --rpc-url {rpc_url} --private-key $PRIVATE_KEY"
|
||||
if integration and base_address and quote_address and rpc_url else ""
|
||||
)
|
||||
seed_cmd = (
|
||||
f"cast send {integration} "
|
||||
f"'addLiquidity(address,uint256,uint256)(uint256,uint256,uint256)' "
|
||||
f"<pool_address> {base_amount or '<base_amount_raw>'} {quote_amount or '<quote_amount_raw>'} "
|
||||
f"--rpc-url {rpc_url} --private-key $PRIVATE_KEY"
|
||||
if integration and rpc_url else ""
|
||||
)
|
||||
|
||||
rows.append({
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
"pair": pair,
|
||||
"base_symbol": base_symbol,
|
||||
"quote_symbol": quote_symbol,
|
||||
"hub_stable": missing["hub_stable"],
|
||||
"rpc_env_key": rpc_key,
|
||||
"integration_env_key": integration_key,
|
||||
"base_env_key": base_env_key,
|
||||
"quote_anchor_source": "deployment-status.json",
|
||||
"rpc_url_present": bool(rpc_url),
|
||||
"integration_present": bool(integration),
|
||||
"base_token_present": bool(base_address),
|
||||
"quote_anchor_present": bool(quote_address),
|
||||
"base_address": base_address or None,
|
||||
"quote_address": quote_address or None,
|
||||
"integration_address": integration or None,
|
||||
"deployer": deployer or None,
|
||||
"live_checks_enabled": live_checks,
|
||||
"existing_pool_address": existing_pool or None,
|
||||
"initial_price_e18": initial_price,
|
||||
"price_mode": price_mode,
|
||||
"fee_bps": fee_bps,
|
||||
"k": k_value,
|
||||
"open_twap": open_twap,
|
||||
"base_amount_raw": base_amount,
|
||||
"quote_amount_raw": quote_amount,
|
||||
"mint_base_amount_raw": mint_amount,
|
||||
"wallet_base_balance_raw": base_balance or None,
|
||||
"wallet_quote_balance_raw": quote_balance or None,
|
||||
"wallet_base_allowance_raw": base_allowance or None,
|
||||
"wallet_quote_allowance_raw": quote_allowance or None,
|
||||
"mintable_base": mintable_base,
|
||||
"base_supply_mode": base_supply_mode,
|
||||
"ready_to_create": create_ready,
|
||||
"ready_to_create_with_bootstrap_price": create_ready_with_bootstrap_price,
|
||||
"ready_to_seed": seed_ready and "insufficient_base_balance" not in blockers and "insufficient_quote_balance" not in blockers,
|
||||
"blockers": sorted(set(blockers)),
|
||||
"create_command": create_cmd,
|
||||
"seed_command": seed_cmd,
|
||||
"next_step": (
|
||||
"deploy_or_seed_now" if seed_ready and "insufficient_base_balance" not in blockers and "insufficient_quote_balance" not in blockers
|
||||
else "operator_review_bootstrap_price" if create_ready_with_bootstrap_price
|
||||
else "resolve_blockers"
|
||||
),
|
||||
})
|
||||
|
||||
summary = {
|
||||
"planned_missing_rows": len(rows),
|
||||
"ready_to_create_strict": sum(1 for row in rows if row["ready_to_create"]),
|
||||
"ready_to_create_with_bootstrap_price": sum(1 for row in rows if row["ready_to_create_with_bootstrap_price"]),
|
||||
"ready_to_seed": sum(1 for row in rows if row["ready_to_seed"]),
|
||||
"existing_pool_rows_found_onchain": sum(1 for row in rows if row["existing_pool_address"] and row["existing_pool_address"] != "0x0000000000000000000000000000000000000000"),
|
||||
"counts_by_network": dict(sorted(Counter(row["network"] for row in rows).items())),
|
||||
"top_blockers": dict(sorted(Counter(blocker for row in rows for blocker in row["blockers"]).items(), key=lambda item: (-item[1], item[0]))[:20]),
|
||||
}
|
||||
|
||||
result = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
||||
"description": "Universal GRU v2 Wave 1 public pool deployment plan derived from the missing-pool gap report, env, deployment-status, and seed policy.",
|
||||
"sources": [
|
||||
"reports/extraction/gru-v2-wave1-public-gap-report-latest.json",
|
||||
"cross-chain-pmm-lps/config/deployment-status.json",
|
||||
"config/extraction/gru-v2-wave1-public-seed-policy.json",
|
||||
"repo env via scripts/lib/load-project-env.sh",
|
||||
"optional live RPC checks via GRU_WAVE1_PLAN_LIVE_CHECKS=1",
|
||||
],
|
||||
"summary": summary,
|
||||
"rows": sorted(rows, key=lambda item: (item["chain_id"], item["pair"])),
|
||||
}
|
||||
|
||||
output_path.write_text(json.dumps(result, indent=2) + "\n")
|
||||
print(json.dumps(summary, indent=2))
|
||||
PY
|
||||
|
||||
echo "Wrote ${OUTPUT_PATH}"
|
||||
152
scripts/deployment/run-gru-v2-full-deployment.sh
Executable file
152
scripts/deployment/run-gru-v2-full-deployment.sh
Executable file
@@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Orchestrate the repo-backed portions of the GRU v2 full deployment plan.
|
||||
# This script is intentionally honest about scope:
|
||||
# - Chain 138 has real PMM desired-state sync and verification paths.
|
||||
# - ALL Mainnet 651940 full cA* mesh is not fully deployable from this repo today.
|
||||
# - Multi-protocol completion on 138/651940 is still partially inventory-only.
|
||||
#
|
||||
# Usage:
|
||||
# bash scripts/deployment/run-gru-v2-full-deployment.sh [--dry-run] [--apply-chain138]
|
||||
#
|
||||
# Exit codes:
|
||||
# 0 = all repo-backed steps passed and no known implementation blockers remain
|
||||
# 1 = one or more repo-backed steps failed
|
||||
# 2 = repo-backed steps passed but external/not-yet-implemented blockers remain
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
DRY_RUN=1
|
||||
APPLY_CHAIN138=0
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--dry-run) DRY_RUN=1 ;;
|
||||
--apply-chain138) DRY_RUN=0; APPLY_CHAIN138=1 ;;
|
||||
*)
|
||||
echo "Unknown argument: $arg" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
run_cmd() {
|
||||
if (( DRY_RUN )); then
|
||||
echo "[DRY-RUN] $*"
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
say() {
|
||||
printf '\n== %s ==\n' "$1"
|
||||
}
|
||||
|
||||
BLOCKERS=0
|
||||
FAILURES=0
|
||||
|
||||
say "GRU v2 full deployment"
|
||||
echo "projectRoot=$PROJECT_ROOT"
|
||||
echo "dryRun=$DRY_RUN"
|
||||
echo "applyChain138=$APPLY_CHAIN138"
|
||||
|
||||
say "Validate planning artifacts"
|
||||
if ! run_cmd python3 "$PROJECT_ROOT/scripts/validation/validate-gru-v2-full-mesh-artifacts.py"; then
|
||||
echo "validation failed: GRU mesh artifacts" >&2
|
||||
FAILURES=1
|
||||
fi
|
||||
|
||||
say "Reconcile current live status"
|
||||
if ! run_cmd python3 "$PROJECT_ROOT/scripts/verify/reconcile-gru-v2-full-mesh-status.py"; then
|
||||
echo "reconcile failed: GRU mesh live status" >&2
|
||||
FAILURES=1
|
||||
fi
|
||||
|
||||
say "Chain 138 repo-backed deployment path"
|
||||
if (( APPLY_CHAIN138 )); then
|
||||
if ! run_cmd bash "$PROJECT_ROOT/scripts/deployment/run-all-next-steps-chain138.sh" --mesh-only --skip-register-gru; then
|
||||
echo "chain 138 deployment path failed" >&2
|
||||
FAILURES=1
|
||||
fi
|
||||
if ! run_cmd bash "$PROJECT_ROOT/scripts/deployment/deploy-chain138-pilot-protocol-venues.sh" --apply; then
|
||||
echo "chain 138 protocol venue deployment path failed" >&2
|
||||
FAILURES=1
|
||||
fi
|
||||
else
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/deployment/run-all-next-steps-chain138.sh" --dry-run --mesh-only --skip-register-gru
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/deployment/deploy-chain138-pilot-protocol-venues.sh" --dry-run
|
||||
fi
|
||||
|
||||
say "Chain 138 readiness"
|
||||
if ! run_cmd bash "$PROJECT_ROOT/scripts/verify/check-gru-v2-chain138-readiness.sh"; then
|
||||
echo "chain 138 readiness failed" >&2
|
||||
FAILURES=1
|
||||
fi
|
||||
|
||||
say "Chain 138 remaining protocol surface"
|
||||
if [[ -x "$PROJECT_ROOT/scripts/deployment/deploy-chain138-aave-v3-execution-stack.sh" ]]; then
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/deployment/deploy-chain138-aave-v3-execution-stack.sh" --dry-run || BLOCKERS=1
|
||||
else
|
||||
echo "No Chain 138 Aave execution deployer was found."
|
||||
BLOCKERS=1
|
||||
fi
|
||||
|
||||
if [[ -x "$PROJECT_ROOT/scripts/deployment/deploy-chain138-aave-quote-push-receiver.sh" ]]; then
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/deployment/deploy-chain138-aave-quote-push-receiver.sh" --dry-run || BLOCKERS=1
|
||||
else
|
||||
echo "No Chain 138 Aave quote-push receiver deployer was found."
|
||||
BLOCKERS=1
|
||||
fi
|
||||
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/verify/check-chain138-remaining-protocol-env.sh" || BLOCKERS=1
|
||||
|
||||
say "ALL Mainnet 651940 implementation gate"
|
||||
if [[ -x "$PROJECT_ROOT/scripts/deployment/deploy-allmainnet-ca-tokens.sh" ]]; then
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/deployment/deploy-allmainnet-ca-tokens.sh" --dry-run
|
||||
else
|
||||
echo "No repo-backed full cA* contract deployer was found for 651940."
|
||||
BLOCKERS=1
|
||||
fi
|
||||
|
||||
if [[ -x "$PROJECT_ROOT/scripts/deployment/sync-allmainnet-pmm-pools-from-json.sh" ]]; then
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/deployment/sync-allmainnet-pmm-pools-from-json.sh"
|
||||
else
|
||||
echo "No repo-backed full 651940 PMM mesh deployer was found."
|
||||
BLOCKERS=1
|
||||
fi
|
||||
|
||||
echo "651940 still requires live cA* addresses, DODO integration/provider addresses, and routeable liquidity to finish."
|
||||
BLOCKERS=1
|
||||
|
||||
say "Protocol completion gate"
|
||||
echo "DODO on Chain 138 is script-backed."
|
||||
echo "Full Uniswap v2/v3, SushiSwap, Curve, Balancer, 1Inch, Aave, GMX, and dYdX completion on 138/651940 is not fully deployer-backed in this repo."
|
||||
echo "Marking protocol completion as externally blocked until venue-specific deploy/integration scripts exist."
|
||||
BLOCKERS=1
|
||||
|
||||
if ! run_cmd bash "$PROJECT_ROOT/scripts/verify/check-gru-v2-core-protocol-blockers.sh"; then
|
||||
BLOCKERS=1
|
||||
fi
|
||||
run_cmd bash "$PROJECT_ROOT/scripts/verify/check-allmainnet-protocol-env.sh" || BLOCKERS=1
|
||||
|
||||
say "Implementation verifier"
|
||||
if ! run_cmd python3 "$PROJECT_ROOT/scripts/verify/check-gru-v2-full-deployment-implementation.py"; then
|
||||
BLOCKERS=1
|
||||
fi
|
||||
|
||||
say "Summary"
|
||||
if (( FAILURES )); then
|
||||
echo "result=failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if (( BLOCKERS )); then
|
||||
echo "result=partial"
|
||||
echo "note=repo-backed deployment steps completed, but external/not-yet-implemented blockers remain"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "result=complete"
|
||||
105
scripts/deployment/run-gru-v2-mainnet-funding.sh
Executable file
105
scripts/deployment/run-gru-v2-mainnet-funding.sh
Executable file
@@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
source "${REPO_ROOT}/scripts/lib/load-project-env.sh" >/dev/null 2>&1 || true
|
||||
source "${REPO_ROOT}/smom-dbis-138/scripts/load-env.sh" >/dev/null 2>&1 || true
|
||||
|
||||
TARGET_USD="${TARGET_USD:-100000}"
|
||||
EXECUTE=0
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
bash scripts/deployment/run-gru-v2-mainnet-funding.sh [--target-usd=100000] [--execute]
|
||||
|
||||
Stages:
|
||||
1. Ensure reports are current
|
||||
2. Print / optionally apply GRU V2 Mainnet bridge parity
|
||||
3. Mint required cUSDT_V2 / cUSDC_V2 on Chain 138
|
||||
4. Bridge the required amounts to Mainnet cW mirrors
|
||||
5. Apply full-target funding to cwusdt-usdt and cwusdc-usdc
|
||||
EOF
|
||||
}
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--target-usd=*) TARGET_USD="${arg#*=}" ;;
|
||||
--execute) EXECUTE=1 ;;
|
||||
--help|-h) usage; exit 0 ;;
|
||||
*) echo "[fail] unknown arg: $arg" >&2; usage >&2; exit 2 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
command -v python3 >/dev/null 2>&1 || { echo "[fail] missing required command: python3" >&2; exit 1; }
|
||||
command -v cast >/dev/null 2>&1 || { echo "[fail] missing required command: cast" >&2; exit 1; }
|
||||
|
||||
bash "${REPO_ROOT}/scripts/verify/build-gru-v2-mainnet-bridge-parity.sh" >/dev/null
|
||||
bash "${REPO_ROOT}/scripts/verify/build-mainnet-direct-exit-funding-plan.sh" >/dev/null
|
||||
bash "${REPO_ROOT}/scripts/verify/build-gru-v2-mainnet-funding-plan.sh" >/dev/null
|
||||
|
||||
TARGET_USD="$TARGET_USD" REPO_ROOT="$REPO_ROOT" python3 - <<'PY'
|
||||
import json, os
|
||||
from pathlib import Path
|
||||
root = Path(os.environ["REPO_ROOT"])
|
||||
target_usd = int(os.environ["TARGET_USD"])
|
||||
report = json.loads((root / "reports/extraction/gru-v2-mainnet-funding-plan-latest.json").read_text())
|
||||
lane = report.get("parity_state", {}).get("lane_policy", {})
|
||||
print("lanePolicy.v2CutoverActive="+str(lane.get("v2_cutover_active")))
|
||||
print("lanePolicy.v1DisplacedAssets="+",".join(lane.get("v1_displaced_assets", [])))
|
||||
for row in report["assets"]:
|
||||
print("asset="+row["symbol"])
|
||||
print("targetUsd="+str(row["target_exit_usd"]))
|
||||
print("mintNeededRaw="+row["mint_needed_raw"])
|
||||
print("bridgeRaw="+row["bridge_amount_raw"])
|
||||
print("pair="+row["funding_pair"])
|
||||
print("quoteSideReadyNow="+str(row.get("wallet_can_fund_quote_side_now")))
|
||||
print("fundPairCommand="+row["fund_pool_command"])
|
||||
print("---")
|
||||
PY
|
||||
|
||||
run_stage() {
|
||||
local cmd="$1"
|
||||
echo "$cmd"
|
||||
if (( EXECUTE == 1 )); then
|
||||
bash -lc "$cmd"
|
||||
fi
|
||||
}
|
||||
|
||||
run_stage "bash ${REPO_ROOT}/scripts/deployment/configure-gru-v2-mainnet-bridge-parity.sh $([[ $EXECUTE -eq 1 ]] && echo --execute)"
|
||||
|
||||
readarray -t ASSET_LINES < <(
|
||||
TARGET_USD="$TARGET_USD" REPO_ROOT="$REPO_ROOT" python3 - <<'PY'
|
||||
import json, os
|
||||
from pathlib import Path
|
||||
root = Path(os.environ["REPO_ROOT"])
|
||||
target_usd = int(os.environ["TARGET_USD"])
|
||||
report = json.loads((root / "reports/extraction/gru-v2-mainnet-funding-plan-latest.json").read_text())
|
||||
for row in report["assets"]:
|
||||
print("|".join([
|
||||
row["symbol"],
|
||||
row["canonical_token"],
|
||||
row["mirrored_token"],
|
||||
row["mint_needed_raw"],
|
||||
row["bridge_amount_raw"],
|
||||
row["funding_pair"],
|
||||
]))
|
||||
PY
|
||||
)
|
||||
|
||||
for entry in "${ASSET_LINES[@]}"; do
|
||||
IFS='|' read -r SYMBOL TOKEN MIRRORED MINT_NEEDED BRIDGE_RAW FUND_PAIR <<<"$entry"
|
||||
if [[ "$MINT_NEEDED" != "0" ]]; then
|
||||
run_stage "cast send ${TOKEN} 'mint(address,uint256)' ${CANONICAL_WALLET:-0x4A666F96fC8764181194447A7dFdb7d471b301C8} ${MINT_NEEDED} --rpc-url \"\$RPC_URL_138\" --private-key \"\$PRIVATE_KEY\" --legacy"
|
||||
fi
|
||||
if [[ "$BRIDGE_RAW" != "0" ]]; then
|
||||
run_stage "bash ${REPO_ROOT}/scripts/bridge/bridge-canonical-token-to-mainnet-cw.sh --label ${SYMBOL} --canonical-token ${TOKEN} --mirrored-token ${MIRRORED} --raw-amount ${BRIDGE_RAW} --recipient ${CANONICAL_WALLET:-0x4A666F96fC8764181194447A7dFdb7d471b301C8} --approve $([[ $EXECUTE -eq 1 ]] && echo --execute)"
|
||||
fi
|
||||
run_stage "bash ${REPO_ROOT}/scripts/deployment/apply-mainnet-direct-exit-funding.sh --pair=${FUND_PAIR} --mode=full-target --target-usd=${TARGET_USD} $([[ $EXECUTE -eq 1 ]] && echo --execute)"
|
||||
done
|
||||
|
||||
if (( EXECUTE == 0 )); then
|
||||
echo "Dry-run only. Re-run with --execute to broadcast."
|
||||
echo "Note: if quoteSideReadyNow=false in the printed plan, the pool-funding stage is still capital-blocked even though the V2 bridge lane is ready."
|
||||
fi
|
||||
360
scripts/deployment/run-gru-v2-wave1-public-pools.sh
Executable file
360
scripts/deployment/run-gru-v2-wave1-public-pools.sh
Executable file
@@ -0,0 +1,360 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
PLAN_PATH="${ROOT_DIR}/reports/extraction/gru-v2-wave1-public-deploy-plan-latest.json"
|
||||
OUTPUT_PATH="${ROOT_DIR}/reports/extraction/gru-v2-wave1-public-deploy-run-latest.json"
|
||||
|
||||
source "${ROOT_DIR}/scripts/lib/load-project-env.sh" >/dev/null 2>&1 || true
|
||||
if [[ -f "${ROOT_DIR}/smom-dbis-138/scripts/lib/deployment/dotenv.sh" ]]; then
|
||||
# shellcheck disable=SC1090
|
||||
source "${ROOT_DIR}/smom-dbis-138/scripts/lib/deployment/dotenv.sh" >/dev/null 2>&1 || true
|
||||
load_deployment_env --repo-root "${ROOT_DIR}/smom-dbis-138" >/dev/null 2>&1 || true
|
||||
export PROJECT_ROOT="${ROOT_DIR}"
|
||||
fi
|
||||
|
||||
require_cmd() {
|
||||
command -v "$1" >/dev/null 2>&1 || {
|
||||
echo "[fail] missing required command: $1" >&2
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
parse_tx_hash() {
|
||||
local output="$1"
|
||||
local tx_hash
|
||||
tx_hash="$(printf '%s\n' "$output" | grep -E '^0x[0-9a-fA-F]{64}$' | tail -n1 || true)"
|
||||
if [[ -z "$tx_hash" ]]; then
|
||||
tx_hash="$(printf '%s\n' "$output" | grep -E '^transactionHash[[:space:]]+0x[0-9a-fA-F]{64}$' | awk '{print $2}' | tail -n1 || true)"
|
||||
fi
|
||||
printf '%s\n' "$tx_hash"
|
||||
}
|
||||
|
||||
require_cmd python3
|
||||
|
||||
CHAIN_ID_FILTER=""
|
||||
PAIR_FILTER=""
|
||||
MAX_POOLS=0
|
||||
EXECUTE=0
|
||||
ONLY_READY=0
|
||||
SKIP_SEED=0
|
||||
ALLOW_BOOTSTRAP_PRICES=0
|
||||
BASE_AMOUNT_OVERRIDE=""
|
||||
QUOTE_AMOUNT_OVERRIDE=""
|
||||
MINT_BASE_AMOUNT_OVERRIDE=""
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--chain-id=*) CHAIN_ID_FILTER="${arg#*=}" ;;
|
||||
--pair=*) PAIR_FILTER="${arg#*=}" ;;
|
||||
--max-pools=*) MAX_POOLS="${arg#*=}" ;;
|
||||
--execute) EXECUTE=1 ;;
|
||||
--only-ready) ONLY_READY=1 ;;
|
||||
--skip-seed) SKIP_SEED=1 ;;
|
||||
--allow-bootstrap-prices) ALLOW_BOOTSTRAP_PRICES=1 ;;
|
||||
--base-amount=*) BASE_AMOUNT_OVERRIDE="${arg#*=}" ;;
|
||||
--quote-amount=*) QUOTE_AMOUNT_OVERRIDE="${arg#*=}" ;;
|
||||
--mint-base-amount=*) MINT_BASE_AMOUNT_OVERRIDE="${arg#*=}" ;;
|
||||
*)
|
||||
echo "[fail] unknown arg: $arg" >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if (( EXECUTE == 1 )); then
|
||||
export GRU_WAVE1_PLAN_LIVE_CHECKS="${GRU_WAVE1_PLAN_LIVE_CHECKS:-1}"
|
||||
fi
|
||||
|
||||
bash "${ROOT_DIR}/scripts/deployment/plan-gru-v2-wave1-public-pools.sh" >/dev/null
|
||||
|
||||
mkdir -p "$(dirname "$OUTPUT_PATH")"
|
||||
|
||||
python3 - <<'PY' "$PLAN_PATH" "$OUTPUT_PATH" "$CHAIN_ID_FILTER" "$PAIR_FILTER" "$MAX_POOLS" "$EXECUTE" "$ONLY_READY" "$SKIP_SEED" "$ALLOW_BOOTSTRAP_PRICES"
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
plan_path = Path(sys.argv[1])
|
||||
output_path = Path(sys.argv[2])
|
||||
chain_filter = sys.argv[3]
|
||||
pair_filter = sys.argv[4].lower()
|
||||
max_pools = int(sys.argv[5])
|
||||
execute = sys.argv[6] == "1"
|
||||
only_ready = sys.argv[7] == "1"
|
||||
skip_seed = sys.argv[8] == "1"
|
||||
allow_bootstrap_prices = sys.argv[9] == "1"
|
||||
|
||||
plan = json.loads(plan_path.read_text())
|
||||
rows = plan["rows"]
|
||||
|
||||
selected = []
|
||||
for row in rows:
|
||||
if chain_filter and str(row["chain_id"]) != chain_filter:
|
||||
continue
|
||||
if pair_filter and row["pair"].lower() != pair_filter:
|
||||
continue
|
||||
if only_ready and not row["ready_to_create_with_bootstrap_price"]:
|
||||
continue
|
||||
selected.append(row)
|
||||
if max_pools and len(selected) >= max_pools:
|
||||
break
|
||||
|
||||
result_rows = []
|
||||
for row in selected:
|
||||
out = {
|
||||
"chain_id": row["chain_id"],
|
||||
"network": row["network"],
|
||||
"pair": row["pair"],
|
||||
"mode": "execute" if execute else "dry_run",
|
||||
"create_planned": row["ready_to_create_with_bootstrap_price"],
|
||||
"seed_planned": row["ready_to_seed"] and not skip_seed,
|
||||
"price_mode": row["price_mode"],
|
||||
"blockers": list(row["blockers"]),
|
||||
"base_supply_mode": row.get("base_supply_mode"),
|
||||
"mintable_base": row.get("mintable_base"),
|
||||
"wallet_base_balance_raw": row.get("wallet_base_balance_raw"),
|
||||
"wallet_quote_balance_raw": row.get("wallet_quote_balance_raw"),
|
||||
"create_tx": None,
|
||||
"seed_tx": None,
|
||||
"pool_address_before": row["existing_pool_address"],
|
||||
"pool_address_after": row["existing_pool_address"],
|
||||
"status": "planned",
|
||||
}
|
||||
if row["price_mode"] == "bootstrap_reference" and not allow_bootstrap_prices:
|
||||
out["status"] = "blocked_bootstrap_price_guard"
|
||||
result_rows.append(out)
|
||||
continue
|
||||
if not execute:
|
||||
result_rows.append(out)
|
||||
continue
|
||||
out["status"] = "execute_requires_shell_runner"
|
||||
result_rows.append(out)
|
||||
|
||||
result = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
||||
"description": "Universal GRU v2 Wave 1 pool operator run record. Execute mode is implemented in the surrounding shell runner.",
|
||||
"selected_count": len(selected),
|
||||
"rows": result_rows,
|
||||
}
|
||||
|
||||
output_path.write_text(json.dumps(result, indent=2) + "\n")
|
||||
print(json.dumps({"selected_count": len(selected)}, indent=2))
|
||||
PY
|
||||
|
||||
if (( EXECUTE == 0 )); then
|
||||
echo "Wrote ${OUTPUT_PATH}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
require_cmd cast
|
||||
|
||||
mapfile -t RUN_ROWS < <(
|
||||
python3 - <<'PY' "$PLAN_PATH" "$CHAIN_ID_FILTER" "$PAIR_FILTER" "$MAX_POOLS" "$ONLY_READY" "$ALLOW_BOOTSTRAP_PRICES" "$BASE_AMOUNT_OVERRIDE" "$QUOTE_AMOUNT_OVERRIDE" "$MINT_BASE_AMOUNT_OVERRIDE"
|
||||
import base64, json, os, sys
|
||||
from pathlib import Path
|
||||
|
||||
plan = json.loads(Path(sys.argv[1]).read_text())
|
||||
chain_filter = sys.argv[2]
|
||||
pair_filter = sys.argv[3].lower()
|
||||
max_pools = int(sys.argv[4])
|
||||
only_ready = sys.argv[5] == "1"
|
||||
allow_bootstrap_prices = sys.argv[6] == "1"
|
||||
base_override = sys.argv[7]
|
||||
quote_override = sys.argv[8]
|
||||
mint_override = sys.argv[9]
|
||||
|
||||
rows = []
|
||||
for row in plan["rows"]:
|
||||
if chain_filter and str(row["chain_id"]) != chain_filter:
|
||||
continue
|
||||
if pair_filter and row["pair"].lower() != pair_filter:
|
||||
continue
|
||||
if only_ready and not row["ready_to_create_with_bootstrap_price"]:
|
||||
continue
|
||||
if row["price_mode"] == "bootstrap_reference" and not allow_bootstrap_prices:
|
||||
continue
|
||||
rows.append(row)
|
||||
if max_pools and len(rows) >= max_pools:
|
||||
break
|
||||
|
||||
for row in rows:
|
||||
payload = {
|
||||
"chain_id": row["chain_id"],
|
||||
"network": row["network"],
|
||||
"pair": row["pair"],
|
||||
"integration": row["integration_address"] or "",
|
||||
"base_addr": row["base_address"] or "",
|
||||
"quote_addr": row["quote_address"] or "",
|
||||
"existing_pool": row["existing_pool_address"] or "",
|
||||
"initial_price": row["initial_price_e18"] or "",
|
||||
"fee_bps": row["fee_bps"] or "",
|
||||
"k_value": row["k"] or "",
|
||||
"open_twap": "true" if row["open_twap"] else "false",
|
||||
"base_amount": base_override or row["base_amount_raw"] or "",
|
||||
"quote_amount": quote_override or row["quote_amount_raw"] or "",
|
||||
"mint_amount": mint_override or row["mint_base_amount_raw"] or "",
|
||||
"rpc_env_key": row["rpc_env_key"] or "",
|
||||
"rpc_url": os.environ.get(row["rpc_env_key"] or "", "") if row.get("rpc_url_present") else "",
|
||||
"ready_to_seed": "true" if row.get("ready_to_seed") else "false",
|
||||
"base_supply_mode": row.get("base_supply_mode") or "",
|
||||
"mintable_base": "true" if row.get("mintable_base") else "false",
|
||||
"wallet_base_balance_raw": row.get("wallet_base_balance_raw") or "",
|
||||
"wallet_quote_balance_raw": row.get("wallet_quote_balance_raw") or "",
|
||||
"blockers_json": json.dumps(row.get("blockers", []), separators=(",", ":")),
|
||||
}
|
||||
print(base64.b64encode(json.dumps(payload).encode()).decode())
|
||||
PY
|
||||
)
|
||||
|
||||
DEPLOYER="$(cast wallet address --private-key "${PRIVATE_KEY:-}")"
|
||||
|
||||
RESULT_JSON="$(
|
||||
python3 - <<'PY'
|
||||
import json
|
||||
print(json.dumps({"generated_at": None, "description": "Universal GRU v2 Wave 1 pool operator live run record.", "rows": []}))
|
||||
PY
|
||||
)"
|
||||
|
||||
for encoded in "${RUN_ROWS[@]}"; do
|
||||
eval "$(
|
||||
python3 - <<'PY' "$encoded"
|
||||
import base64, json, shlex, sys
|
||||
row = json.loads(base64.b64decode(sys.argv[1]).decode())
|
||||
for key, value in row.items():
|
||||
print(f"{key.upper()}={shlex.quote(str(value))}")
|
||||
PY
|
||||
)"
|
||||
STATUS="executed"
|
||||
CREATE_TX=""
|
||||
SEED_TX=""
|
||||
POOL_ADDRESS="$EXISTING_POOL"
|
||||
NOTES=()
|
||||
SEED_READY=0
|
||||
MINTABLE_BASE_READY=0
|
||||
HAS_INSUFFICIENT_BASE=0
|
||||
HAS_INSUFFICIENT_QUOTE=0
|
||||
|
||||
if [[ "${READY_TO_SEED:-false}" == "true" ]]; then
|
||||
SEED_READY=1
|
||||
fi
|
||||
if [[ "${MINTABLE_BASE:-false}" == "true" ]]; then
|
||||
MINTABLE_BASE_READY=1
|
||||
fi
|
||||
if [[ "${BLOCKERS_JSON:-[]}" == *'"insufficient_base_balance"'* ]]; then
|
||||
HAS_INSUFFICIENT_BASE=1
|
||||
fi
|
||||
if [[ "${BLOCKERS_JSON:-[]}" == *'"insufficient_quote_balance"'* ]]; then
|
||||
HAS_INSUFFICIENT_QUOTE=1
|
||||
fi
|
||||
|
||||
if [[ -z "$RPC_URL" || -z "$INTEGRATION" || -z "$BASE_ADDR" || -z "$QUOTE_ADDR" ]]; then
|
||||
STATUS="blocked_missing_runtime_inputs"
|
||||
else
|
||||
if [[ -z "$POOL_ADDRESS" || "$POOL_ADDRESS" == "0x0000000000000000000000000000000000000000" ]]; then
|
||||
create_output="$(
|
||||
cast send "$INTEGRATION" \
|
||||
'createPool(address,address,uint256,uint256,uint256,bool)(address)' \
|
||||
"$BASE_ADDR" "$QUOTE_ADDR" "$FEE_BPS" "$INITIAL_PRICE" "$K_VALUE" "$OPEN_TWAP" \
|
||||
--rpc-url "$RPC_URL" \
|
||||
--private-key "${PRIVATE_KEY:-}"
|
||||
)"
|
||||
CREATE_TX="$(parse_tx_hash "$create_output")"
|
||||
POOL_ADDRESS="$(cast call "$INTEGRATION" 'pools(address,address)(address)' "$BASE_ADDR" "$QUOTE_ADDR" --rpc-url "$RPC_URL" | awk '{print $1}')"
|
||||
fi
|
||||
|
||||
if (( SKIP_SEED == 0 )) && [[ -n "$POOL_ADDRESS" && "$POOL_ADDRESS" != "0x0000000000000000000000000000000000000000" && -n "$BASE_AMOUNT" && -n "$QUOTE_AMOUNT" ]]; then
|
||||
if (( HAS_INSUFFICIENT_BASE == 1 )); then
|
||||
STATUS="blocked_seed_base_supply"
|
||||
NOTES+=("seed_skipped_insufficient_base_supply")
|
||||
elif (( HAS_INSUFFICIENT_QUOTE == 1 )); then
|
||||
STATUS="blocked_seed_quote_supply"
|
||||
NOTES+=("seed_skipped_insufficient_quote_supply")
|
||||
elif (( SEED_READY == 0 )); then
|
||||
STATUS="blocked_seed_readiness"
|
||||
NOTES+=("seed_skipped_not_ready")
|
||||
else
|
||||
BASE_BAL="${WALLET_BASE_BALANCE_RAW:-}"
|
||||
QUOTE_BAL="${WALLET_QUOTE_BALANCE_RAW:-}"
|
||||
if [[ -z "$BASE_BAL" ]]; then
|
||||
BASE_BAL="$(cast call "$BASE_ADDR" 'balanceOf(address)(uint256)' "$DEPLOYER" --rpc-url "$RPC_URL" | awk '{print $1}')"
|
||||
fi
|
||||
if [[ -z "$QUOTE_BAL" ]]; then
|
||||
QUOTE_BAL="$(cast call "$QUOTE_ADDR" 'balanceOf(address)(uint256)' "$DEPLOYER" --rpc-url "$RPC_URL" | awk '{print $1}')"
|
||||
fi
|
||||
if (( BASE_BAL < BASE_AMOUNT )); then
|
||||
if (( MINTABLE_BASE_READY == 1 )) && [[ -n "$MINT_AMOUNT" && "$MINT_AMOUNT" != "0" ]]; then
|
||||
cast send "$BASE_ADDR" 'mint(address,uint256)' "$DEPLOYER" "$MINT_AMOUNT" --rpc-url "$RPC_URL" --private-key "${PRIVATE_KEY:-}" >/dev/null
|
||||
NOTES+=("minted_base")
|
||||
BASE_BAL="$(cast call "$BASE_ADDR" 'balanceOf(address)(uint256)' "$DEPLOYER" --rpc-url "$RPC_URL" | awk '{print $1}')"
|
||||
fi
|
||||
fi
|
||||
if (( BASE_BAL < BASE_AMOUNT )); then
|
||||
STATUS="blocked_seed_base_supply"
|
||||
NOTES+=("seed_skipped_postcheck_base_short")
|
||||
elif (( QUOTE_BAL < QUOTE_AMOUNT )); then
|
||||
STATUS="blocked_seed_quote_supply"
|
||||
NOTES+=("seed_skipped_postcheck_quote_short")
|
||||
else
|
||||
cast send "$BASE_ADDR" 'approve(address,uint256)(bool)' "$INTEGRATION" "$BASE_AMOUNT" --rpc-url "$RPC_URL" --private-key "${PRIVATE_KEY:-}" >/dev/null
|
||||
cast send "$QUOTE_ADDR" 'approve(address,uint256)(bool)' "$INTEGRATION" "$QUOTE_AMOUNT" --rpc-url "$RPC_URL" --private-key "${PRIVATE_KEY:-}" >/dev/null
|
||||
seed_output="$(
|
||||
cast send "$INTEGRATION" \
|
||||
'addLiquidity(address,uint256,uint256)(uint256,uint256,uint256)' \
|
||||
"$POOL_ADDRESS" "$BASE_AMOUNT" "$QUOTE_AMOUNT" \
|
||||
--rpc-url "$RPC_URL" \
|
||||
--private-key "${PRIVATE_KEY:-}"
|
||||
)"
|
||||
SEED_TX="$(parse_tx_hash "$seed_output")"
|
||||
if [[ -n "$CREATE_TX" ]]; then
|
||||
STATUS="executed_create_and_seed"
|
||||
else
|
||||
STATUS="executed_seed_only"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
elif [[ -n "$CREATE_TX" ]]; then
|
||||
STATUS="executed_create_only"
|
||||
fi
|
||||
fi
|
||||
|
||||
RESULT_JSON="$(
|
||||
python3 - <<'PY' "$RESULT_JSON" "$CHAIN_ID" "$NETWORK" "$PAIR" "$STATUS" "$CREATE_TX" "$SEED_TX" "$POOL_ADDRESS" "$EXISTING_POOL" "$(printf '%s\n' "${NOTES[*]}")" "${BASE_SUPPLY_MODE:-}" "${MINTABLE_BASE:-false}" "${WALLET_BASE_BALANCE_RAW:-}" "${WALLET_QUOTE_BALANCE_RAW:-}" "${BLOCKERS_JSON:-[]}"
|
||||
import json, sys
|
||||
doc = json.loads(sys.argv[1])
|
||||
doc["rows"].append({
|
||||
"chain_id": int(sys.argv[2]),
|
||||
"network": sys.argv[3],
|
||||
"pair": sys.argv[4],
|
||||
"status": sys.argv[5],
|
||||
"create_tx": sys.argv[6] or None,
|
||||
"seed_tx": sys.argv[7] or None,
|
||||
"pool_address_before": sys.argv[9] or None,
|
||||
"pool_address_after": sys.argv[8] or None,
|
||||
"notes": [x for x in sys.argv[10].split() if x],
|
||||
"base_supply_mode": sys.argv[11] or None,
|
||||
"mintable_base": sys.argv[12] == "true",
|
||||
"wallet_base_balance_raw": sys.argv[13] or None,
|
||||
"wallet_quote_balance_raw": sys.argv[14] or None,
|
||||
"blockers": json.loads(sys.argv[15] or "[]"),
|
||||
})
|
||||
print(json.dumps(doc))
|
||||
PY
|
||||
)"
|
||||
done
|
||||
|
||||
python3 - <<'PY' "$RESULT_JSON" "$OUTPUT_PATH"
|
||||
import json, sys
|
||||
from datetime import datetime, timezone
|
||||
doc = json.loads(sys.argv[1])
|
||||
doc["generated_at"] = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
||||
with open(sys.argv[2], "w") as fh:
|
||||
json.dump(doc, fh, indent=2)
|
||||
fh.write("\n")
|
||||
print(json.dumps({"executed_rows": len(doc["rows"])}, indent=2))
|
||||
PY
|
||||
|
||||
echo "Wrote ${OUTPUT_PATH}"
|
||||
83
scripts/validation/validate-gru-v2-full-mesh-artifacts.py
Normal file
83
scripts/validation/validate-gru-v2-full-mesh-artifacts.py
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import pathlib
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
ROOT = pathlib.Path(__file__).resolve().parents[2]
|
||||
TRACKER = ROOT / "config/gru-v2-full-mesh-pool-tracker.json"
|
||||
SCHEMA = ROOT / "config/gru-v2-full-mesh-pool-tracker.schema.json"
|
||||
MASTER = ROOT / "config/gru-v2-full-mesh-master-matrix.json"
|
||||
MASTER_DOC = ROOT / "docs/04-configuration/GRU_V2_FULL_MESH_MASTER_MATRIX.md"
|
||||
CHECKLIST = ROOT / "docs/04-configuration/GRU_V2_FULL_MESH_EXECUTION_CHECKLIST.md"
|
||||
PROTOCOL = ROOT / "docs/04-configuration/GRU_V2_PROTOCOL_COMPLETION_MATRIX.md"
|
||||
|
||||
|
||||
def fail(msg: str) -> None:
|
||||
print(f"ERROR: {msg}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def load_json(path: pathlib.Path):
|
||||
try:
|
||||
return json.loads(path.read_text())
|
||||
except Exception as exc:
|
||||
fail(f"failed to parse {path}: {exc}")
|
||||
|
||||
|
||||
def require(cond: bool, msg: str) -> None:
|
||||
if not cond:
|
||||
fail(msg)
|
||||
|
||||
|
||||
def validate_tracker(tracker: dict) -> None:
|
||||
require(re.fullmatch(r"\d{4}-\d{2}-\d{2}", tracker.get("statusDate", "")) is not None, "tracker statusDate must be YYYY-MM-DD")
|
||||
default_fields = tracker.get("defaultFields", {})
|
||||
for key in ["status", "deployed", "seeded", "validated", "live", "mevReady"]:
|
||||
require(key in default_fields, f"defaultFields missing {key}")
|
||||
require(default_fields["status"] in {"todo", "in_progress", "blocked", "done"}, "defaultFields.status invalid")
|
||||
|
||||
chain138_pairs = [row["pair"] for row in tracker["chain138"]["entries"]]
|
||||
allmain_pairs = [row["pair"] for row in tracker["allMainnet651940"]["entries"]]
|
||||
require("cUSDT V2 / cUSDC V2" in chain138_pairs, "chain138 tracker missing canonical USD hub pair")
|
||||
require("cAUSDT / cAUSDC" in allmain_pairs, "allMainnet tracker missing canonical USD hub pair")
|
||||
|
||||
public_mesh = tracker["publicMesh"]
|
||||
for chain in ["1", "10", "25", "56", "100", "137", "1111", "8453", "42161", "42220", "43114"]:
|
||||
require(chain in public_mesh, f"publicMesh missing connected chain {chain}")
|
||||
entries = public_mesh[chain]["entries"]
|
||||
require("cWUSDT / USDC" in entries, f"publicMesh {chain} missing cWUSDT / USDC")
|
||||
require("cWUSDC / USDC" in entries, f"publicMesh {chain} missing cWUSDC / USDC")
|
||||
|
||||
|
||||
def validate_master(master: dict) -> None:
|
||||
phases = {row["id"] for row in master.get("executionPhases", [])}
|
||||
require(phases == {"P0", "P1", "P2", "P3", "P4", "P5", "P6", "P7"}, "executionPhases must contain P0..P7")
|
||||
protocols = set(master.get("protocolsRequired", []))
|
||||
for protocol in ["DODO", "Uniswap v3", "Uniswap v2", "SushiSwap", "Curve", "Balancer", "1Inch", "Aave", "GMX", "dYdX"]:
|
||||
require(protocol in protocols, f"master matrix missing protocol {protocol}")
|
||||
|
||||
|
||||
def validate_docs() -> None:
|
||||
for path in [MASTER_DOC, CHECKLIST, PROTOCOL]:
|
||||
require(path.exists(), f"missing doc {path}")
|
||||
text = path.read_text()
|
||||
require("Chain 138" in text or "138" in text, f"{path.name} does not mention Chain 138")
|
||||
require("651940" in text or "ALL Mainnet" in text, f"{path.name} does not mention ALL Mainnet")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
for path in [TRACKER, SCHEMA, MASTER, MASTER_DOC, CHECKLIST, PROTOCOL]:
|
||||
require(path.exists(), f"required artifact missing: {path}")
|
||||
tracker = load_json(TRACKER)
|
||||
_schema = load_json(SCHEMA)
|
||||
master = load_json(MASTER)
|
||||
validate_tracker(tracker)
|
||||
validate_master(master)
|
||||
validate_docs()
|
||||
print("GRU_V2_FULL_MESH_ARTIFACTS_OK")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
5
scripts/verify/build-gru-v2-mainnet-bridge-parity.sh
Executable file
5
scripts/verify/build-gru-v2-mainnet-bridge-parity.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
source "$ROOT/scripts/lib/load-project-env.sh" >/dev/null 2>&1 || true
|
||||
python3 "$ROOT/scripts/lib/immediate_liquidity_expansion.py" parity
|
||||
5
scripts/verify/build-gru-v2-mainnet-funding-plan.sh
Executable file
5
scripts/verify/build-gru-v2-mainnet-funding-plan.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
source "$ROOT/scripts/lib/load-project-env.sh" >/dev/null 2>&1 || true
|
||||
python3 "$ROOT/scripts/lib/immediate_liquidity_expansion.py" v2-funding
|
||||
298
scripts/verify/build-gru-v2-wave1-funding-authority-report.sh
Normal file
298
scripts/verify/build-gru-v2-wave1-funding-authority-report.sh
Normal file
@@ -0,0 +1,298 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
REPO_ROOT="$PROJECT_ROOT"
|
||||
OUTPUT_PATH="${PROJECT_ROOT}/reports/extraction/gru-v2-wave1-funding-authority-report-latest.json"
|
||||
PLAN_PATH="${PROJECT_ROOT}/reports/extraction/gru-v2-wave1-public-deploy-plan-latest.json"
|
||||
GAP_REPORT_PATH="${PROJECT_ROOT}/reports/extraction/gru-v2-wave1-public-gap-report-latest.json"
|
||||
VERIFY_MATRIX_PATH="${PROJECT_ROOT}/reports/status/contract_verification_publish_matrix.json"
|
||||
|
||||
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" >/dev/null 2>&1 || true
|
||||
if [[ -f "${PROJECT_ROOT}/smom-dbis-138/scripts/lib/deployment/dotenv.sh" ]]; then
|
||||
# shellcheck disable=SC1090
|
||||
source "${PROJECT_ROOT}/smom-dbis-138/scripts/lib/deployment/dotenv.sh" >/dev/null 2>&1 || true
|
||||
load_deployment_env --repo-root "${PROJECT_ROOT}/smom-dbis-138" >/dev/null 2>&1 || true
|
||||
PROJECT_ROOT="$REPO_ROOT"
|
||||
export PROJECT_ROOT
|
||||
fi
|
||||
|
||||
mkdir -p "$(dirname "$OUTPUT_PATH")"
|
||||
|
||||
NEEDS_PLAN_REFRESH="$(
|
||||
python3 - <<'PY' "$PLAN_PATH" "$GAP_REPORT_PATH"
|
||||
import json, sys
|
||||
from pathlib import Path
|
||||
|
||||
plan_path = Path(sys.argv[1])
|
||||
gap_report_path = Path(sys.argv[2])
|
||||
|
||||
if not plan_path.exists() or not gap_report_path.exists():
|
||||
print("1")
|
||||
raise SystemExit
|
||||
|
||||
plan = json.loads(plan_path.read_text())
|
||||
gap = json.loads(gap_report_path.read_text())
|
||||
rows = plan.get("rows", [])
|
||||
expected = gap.get("summary", {}).get("first_tier_wave1_pools_missing")
|
||||
live_rows = [row for row in rows if row.get("live_checks_enabled")]
|
||||
|
||||
if expected is None or len(rows) != expected or len(live_rows) != len(rows):
|
||||
print("1")
|
||||
else:
|
||||
print("0")
|
||||
PY
|
||||
)"
|
||||
|
||||
if [[ "$NEEDS_PLAN_REFRESH" == "1" ]]; then
|
||||
export GRU_WAVE1_PLAN_LIVE_CHECKS="${GRU_WAVE1_PLAN_LIVE_CHECKS:-1}"
|
||||
bash "${PROJECT_ROOT}/scripts/deployment/plan-gru-v2-wave1-public-pools.sh" >/dev/null
|
||||
fi
|
||||
|
||||
python3 - <<'PY' "$PLAN_PATH" "$GAP_REPORT_PATH" "$VERIFY_MATRIX_PATH" "$OUTPUT_PATH"
|
||||
import json
|
||||
import sys
|
||||
from collections import Counter, defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
plan_path = Path(sys.argv[1])
|
||||
gap_report_path = Path(sys.argv[2])
|
||||
verify_matrix_path = Path(sys.argv[3])
|
||||
output_path = Path(sys.argv[4])
|
||||
|
||||
plan = json.loads(plan_path.read_text())
|
||||
gap_report = json.loads(gap_report_path.read_text())
|
||||
verify_matrix = json.loads(verify_matrix_path.read_text()) if verify_matrix_path.exists() else {"entries": []}
|
||||
|
||||
plan_rows = {
|
||||
(int(row["chain_id"]), row["pair"]): row
|
||||
for row in plan.get("rows", [])
|
||||
}
|
||||
|
||||
by_label = {}
|
||||
by_address = {}
|
||||
for entry in verify_matrix.get("entries", []):
|
||||
chain_id = str(entry.get("chainId", ""))
|
||||
label = entry.get("label", "")
|
||||
address = (entry.get("address") or "").lower()
|
||||
if chain_id and label:
|
||||
by_label[(chain_id, label)] = entry
|
||||
if chain_id and address:
|
||||
by_address[(chain_id, address)] = entry
|
||||
|
||||
|
||||
def verification_entry(chain_id: int, label: str = "", address: str = ""):
|
||||
chain_key = str(chain_id)
|
||||
if label and (chain_key, label) in by_label:
|
||||
return by_label[(chain_key, label)]
|
||||
address = (address or "").lower()
|
||||
if address and (chain_key, address) in by_address:
|
||||
return by_address[(chain_key, address)]
|
||||
return None
|
||||
|
||||
|
||||
def verification_state_for(chain_id: int, label: str = "", address: str = "", exists_expected: bool = True):
|
||||
if not exists_expected:
|
||||
return {
|
||||
"tracked": False,
|
||||
"verification_status": "not_deployed",
|
||||
"publication_status": "not_deployed",
|
||||
"publish_surface": "",
|
||||
"explorer": "",
|
||||
"notes": "Pool or contract not yet deployed",
|
||||
}
|
||||
entry = verification_entry(chain_id, label=label, address=address)
|
||||
if not entry:
|
||||
return {
|
||||
"tracked": False,
|
||||
"verification_status": "untracked",
|
||||
"publication_status": "untracked",
|
||||
"publish_surface": "",
|
||||
"explorer": "",
|
||||
"notes": "No matching verification/publication matrix row found",
|
||||
}
|
||||
return {
|
||||
"tracked": True,
|
||||
"verification_status": entry.get("verificationStatus", "unknown"),
|
||||
"publication_status": entry.get("publicationStatus", "unknown"),
|
||||
"publish_surface": entry.get("publishSurface", ""),
|
||||
"explorer": entry.get("explorer", ""),
|
||||
"notes": entry.get("publishNotes", ""),
|
||||
}
|
||||
|
||||
|
||||
def is_complete(status_block: dict) -> bool:
|
||||
return (
|
||||
status_block.get("tracked") is True
|
||||
and status_block.get("verification_status") == "complete"
|
||||
and status_block.get("publication_status") == "complete"
|
||||
)
|
||||
|
||||
|
||||
rows = []
|
||||
chain_summary = defaultdict(lambda: {
|
||||
"missing_pair_count": 0,
|
||||
"rows_missing_quote_side_stable": 0,
|
||||
"rows_missing_base_side_balance": 0,
|
||||
"rows_missing_base_side_mintability": 0,
|
||||
"rows_missing_integration": 0,
|
||||
"rows_missing_verification_publication": 0,
|
||||
})
|
||||
|
||||
for gap_row in gap_report.get("missing_first_tier_wave1_pools", []):
|
||||
key = (int(gap_row["chain_id"]), gap_row["pair"])
|
||||
plan_row = plan_rows.get(key)
|
||||
if not plan_row:
|
||||
continue
|
||||
|
||||
chain_id = int(plan_row["chain_id"])
|
||||
network = plan_row["network"]
|
||||
pair = plan_row["pair"]
|
||||
base_symbol = plan_row["base_symbol"]
|
||||
quote_symbol = plan_row["quote_symbol"]
|
||||
existing_pool = plan_row.get("existing_pool_address") or ""
|
||||
pool_exists = existing_pool not in ("", "0x0000000000000000000000000000000000000000", None)
|
||||
|
||||
quote_balance_raw = plan_row.get("wallet_quote_balance_raw")
|
||||
quote_amount_raw = plan_row.get("quote_amount_raw")
|
||||
base_balance_raw = plan_row.get("wallet_base_balance_raw")
|
||||
base_amount_raw = plan_row.get("base_amount_raw")
|
||||
|
||||
quote_has_balance = bool(quote_balance_raw) and bool(quote_amount_raw) and int(quote_balance_raw) >= int(quote_amount_raw)
|
||||
base_has_balance = bool(base_balance_raw) and bool(base_amount_raw) and int(base_balance_raw) >= int(base_amount_raw)
|
||||
base_mintable = bool(plan_row.get("mintable_base"))
|
||||
integration_present = bool(plan_row.get("integration_present"))
|
||||
|
||||
base_ver = verification_state_for(chain_id, label=base_symbol, address=plan_row.get("base_address") or "", exists_expected=bool(plan_row.get("base_address")))
|
||||
quote_ver = verification_state_for(chain_id, label=quote_symbol, address=plan_row.get("quote_address") or "", exists_expected=bool(plan_row.get("quote_address")))
|
||||
pool_ver = verification_state_for(chain_id, label=pair, address=existing_pool, exists_expected=pool_exists)
|
||||
integration_ver = verification_state_for(chain_id, address=plan_row.get("integration_address") or "", exists_expected=integration_present)
|
||||
|
||||
verification_publication_complete = all(
|
||||
is_complete(block) for block in [base_ver, quote_ver]
|
||||
) and (
|
||||
is_complete(pool_ver) if pool_exists else False
|
||||
) and (
|
||||
is_complete(integration_ver) if integration_present else False
|
||||
)
|
||||
|
||||
missing = {
|
||||
"quote_side_stable": not quote_has_balance,
|
||||
"base_side_balance": not base_has_balance,
|
||||
"base_side_mintability": not base_mintable,
|
||||
"integration": not integration_present,
|
||||
"verification_publication_status": not verification_publication_complete,
|
||||
}
|
||||
|
||||
missing_reasons = []
|
||||
if missing["quote_side_stable"]:
|
||||
missing_reasons.append("quote_side_stable")
|
||||
if missing["base_side_balance"]:
|
||||
missing_reasons.append("base_side_balance")
|
||||
if missing["base_side_mintability"]:
|
||||
missing_reasons.append("base_side_mintability")
|
||||
if missing["integration"]:
|
||||
missing_reasons.append("integration")
|
||||
if missing["verification_publication_status"]:
|
||||
missing_reasons.append("verification_publication_status")
|
||||
|
||||
verification_publication = {
|
||||
"overall_complete": verification_publication_complete,
|
||||
"base_token": base_ver,
|
||||
"quote_token": quote_ver,
|
||||
"integration_contract": integration_ver,
|
||||
"pool_contract": pool_ver,
|
||||
}
|
||||
|
||||
row = {
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
"pair": pair,
|
||||
"hub_stable": gap_row.get("hub_stable"),
|
||||
"missing": missing,
|
||||
"missing_reasons": missing_reasons,
|
||||
"quote_side_stable": {
|
||||
"quote_symbol": quote_symbol,
|
||||
"quote_address": plan_row.get("quote_address"),
|
||||
"required_raw": quote_amount_raw,
|
||||
"wallet_balance_raw": quote_balance_raw,
|
||||
"sufficient": quote_has_balance,
|
||||
},
|
||||
"base_side_balance": {
|
||||
"base_symbol": base_symbol,
|
||||
"base_address": plan_row.get("base_address"),
|
||||
"required_raw": base_amount_raw,
|
||||
"wallet_balance_raw": base_balance_raw,
|
||||
"sufficient": base_has_balance,
|
||||
"base_supply_mode": plan_row.get("base_supply_mode"),
|
||||
},
|
||||
"base_side_mintability": {
|
||||
"mintable_base": base_mintable,
|
||||
"mint_base_amount_raw": plan_row.get("mint_base_amount_raw"),
|
||||
},
|
||||
"integration": {
|
||||
"env_key": plan_row.get("integration_env_key"),
|
||||
"address": plan_row.get("integration_address"),
|
||||
"present": integration_present,
|
||||
},
|
||||
"verification_publication_status": verification_publication,
|
||||
"existing_pool_address": existing_pool or None,
|
||||
"ready_to_create": plan_row.get("ready_to_create"),
|
||||
"ready_to_create_with_bootstrap_price": plan_row.get("ready_to_create_with_bootstrap_price"),
|
||||
"ready_to_seed_live": plan_row.get("ready_to_seed"),
|
||||
"blockers": plan_row.get("blockers", []),
|
||||
"next_step": plan_row.get("next_step"),
|
||||
}
|
||||
rows.append(row)
|
||||
|
||||
chain_entry = chain_summary[(chain_id, network)]
|
||||
chain_entry["missing_pair_count"] += 1
|
||||
chain_entry["rows_missing_quote_side_stable"] += int(missing["quote_side_stable"])
|
||||
chain_entry["rows_missing_base_side_balance"] += int(missing["base_side_balance"])
|
||||
chain_entry["rows_missing_base_side_mintability"] += int(missing["base_side_mintability"])
|
||||
chain_entry["rows_missing_integration"] += int(missing["integration"])
|
||||
chain_entry["rows_missing_verification_publication"] += int(missing["verification_publication_status"])
|
||||
|
||||
summary = {
|
||||
"remaining_missing_pairs": len(rows),
|
||||
"rows_missing_quote_side_stable": sum(int(row["missing"]["quote_side_stable"]) for row in rows),
|
||||
"rows_missing_base_side_balance": sum(int(row["missing"]["base_side_balance"]) for row in rows),
|
||||
"rows_missing_base_side_mintability": sum(int(row["missing"]["base_side_mintability"]) for row in rows),
|
||||
"rows_missing_integration": sum(int(row["missing"]["integration"]) for row in rows),
|
||||
"rows_missing_verification_publication": sum(int(row["missing"]["verification_publication_status"]) for row in rows),
|
||||
"top_missing_reasons": dict(
|
||||
sorted(
|
||||
Counter(reason for row in rows for reason in row["missing_reasons"]).items(),
|
||||
key=lambda item: (-item[1], item[0]),
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
chains = []
|
||||
for (chain_id, network), item in sorted(chain_summary.items()):
|
||||
chains.append({
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
**item,
|
||||
})
|
||||
|
||||
result = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
||||
"description": "Chain-by-chain GRU v2 Wave 1 funding and authority report for the remaining missing public pools.",
|
||||
"sources": [
|
||||
str(plan_path.relative_to(output_path.parents[2])),
|
||||
str(gap_report_path.relative_to(output_path.parents[2])),
|
||||
str(verify_matrix_path.relative_to(output_path.parents[2])) if verify_matrix_path.exists() else "reports/status/contract_verification_publish_matrix.json (missing)",
|
||||
],
|
||||
"summary": summary,
|
||||
"chains": chains,
|
||||
"rows": sorted(rows, key=lambda item: (item["chain_id"], item["pair"])),
|
||||
}
|
||||
|
||||
output_path.write_text(json.dumps(result, indent=2) + "\n")
|
||||
print(json.dumps(summary, indent=2))
|
||||
PY
|
||||
|
||||
echo "Wrote ${OUTPUT_PATH}"
|
||||
185
scripts/verify/build-gru-v2-wave1-public-gap-report.sh
Executable file
185
scripts/verify/build-gru-v2-wave1-public-gap-report.sh
Executable file
@@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build a machine-readable GRU v2 public Wave 1 gap report.
|
||||
# Captures missing first-tier pools and missing token suites by network.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
OUTPUT_PATH="${PROJECT_ROOT}/reports/extraction/gru-v2-wave1-public-gap-report-latest.json"
|
||||
|
||||
mkdir -p "$(dirname "$OUTPUT_PATH")"
|
||||
|
||||
python3 - <<'PY' "$PROJECT_ROOT" "$OUTPUT_PATH"
|
||||
import json
|
||||
from collections import Counter
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
project_root = Path(sys.argv[1])
|
||||
output_path = Path(sys.argv[2])
|
||||
|
||||
deployment_status = json.loads((project_root / "cross-chain-pmm-lps/config/deployment-status.json").read_text())
|
||||
queue_json = json.loads(
|
||||
__import__("subprocess").check_output(
|
||||
["bash", "scripts/verify/check-gru-v2-deployment-queue.sh", "--json"],
|
||||
cwd=project_root,
|
||||
text=True,
|
||||
)
|
||||
)
|
||||
|
||||
loaded_full_mesh_tokens = {
|
||||
"cWUSDT",
|
||||
"cWUSDC",
|
||||
"cWEURC",
|
||||
"cWEURT",
|
||||
"cWGBPC",
|
||||
"cWGBPT",
|
||||
"cWAUDC",
|
||||
"cWJPYC",
|
||||
"cWCHFC",
|
||||
"cWCADC",
|
||||
"cWXAUC",
|
||||
"cWXAUT",
|
||||
}
|
||||
|
||||
gas_native_required = {
|
||||
1: ["cWETH"],
|
||||
10: ["cWETHL2"],
|
||||
25: ["cWCRO"],
|
||||
56: ["cWBNB"],
|
||||
100: ["cWXDAI"],
|
||||
137: ["cWPOL"],
|
||||
1111: ["cWWEMIX"],
|
||||
8453: ["cWETHL2"],
|
||||
42161: ["cWETHL2"],
|
||||
42220: ["cWCELO"],
|
||||
43114: ["cWAVAX"],
|
||||
}
|
||||
|
||||
network_names = {
|
||||
1: "Ethereum Mainnet",
|
||||
10: "Optimism",
|
||||
25: "Cronos",
|
||||
56: "BSC",
|
||||
100: "Gnosis",
|
||||
137: "Polygon",
|
||||
1111: "Wemix",
|
||||
8453: "Base",
|
||||
42161: "Arbitrum One",
|
||||
42220: "Celo",
|
||||
43114: "Avalanche C-Chain",
|
||||
}
|
||||
|
||||
|
||||
def normalize_pair(pair: str) -> str:
|
||||
return " / ".join(part.strip() for part in pair.split("/"))
|
||||
|
||||
|
||||
chains = []
|
||||
missing_pool_rows = []
|
||||
missing_tokens_rows = []
|
||||
live_pool_rows = []
|
||||
|
||||
for row in queue_json["chainQueue"]:
|
||||
chain_id = row["chainId"]
|
||||
network = row["name"]
|
||||
planned = set(row["plannedWave1Pairs"])
|
||||
recorded = set(row["recordedWave1Pairs"])
|
||||
missing = sorted(planned - recorded)
|
||||
live = sorted(recorded)
|
||||
|
||||
chain_status = deployment_status["chains"].get(str(chain_id), {})
|
||||
actual_tokens = set((chain_status.get("cwTokens") or {}).keys())
|
||||
missing_tokens = sorted(token for token in loaded_full_mesh_tokens if token not in actual_tokens)
|
||||
missing_gas = [token for token in gas_native_required.get(chain_id, []) if token not in set((chain_status.get("gasMirrors") or {}).keys())]
|
||||
|
||||
chain_entry = {
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
"hub_stable": row["hubStable"],
|
||||
"bridge_available": row["bridgeAvailable"],
|
||||
"cw_token_count": row["cwTokenCount"],
|
||||
"planned_wave1_pair_count": len(planned),
|
||||
"recorded_live_pair_count": len(recorded),
|
||||
"missing_wave1_pair_count": len(missing),
|
||||
"live_wave1_pairs": live,
|
||||
"missing_wave1_pairs": missing,
|
||||
"missing_wrapped_tokens": missing_tokens,
|
||||
"missing_gas_native_tokens": missing_gas,
|
||||
"next_step": row["nextStep"],
|
||||
}
|
||||
chains.append(chain_entry)
|
||||
|
||||
for pair in missing:
|
||||
missing_pool_rows.append(
|
||||
{
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
"pair": pair,
|
||||
"hub_stable": row["hubStable"],
|
||||
"next_step": row["nextStep"],
|
||||
}
|
||||
)
|
||||
for pair in live:
|
||||
live_pool_rows.append(
|
||||
{
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
"pair": pair,
|
||||
"hub_stable": row["hubStable"],
|
||||
}
|
||||
)
|
||||
for token in missing_tokens:
|
||||
missing_tokens_rows.append(
|
||||
{
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
"token": token,
|
||||
"token_type": "wrapped_wave1",
|
||||
}
|
||||
)
|
||||
for token in missing_gas:
|
||||
missing_tokens_rows.append(
|
||||
{
|
||||
"chain_id": chain_id,
|
||||
"network": network,
|
||||
"token": token,
|
||||
"token_type": "gas_native_public_mirror",
|
||||
}
|
||||
)
|
||||
|
||||
summary = {
|
||||
"desired_public_evm_targets": queue_json["summary"]["desiredPublicEvmTargets"],
|
||||
"chains_with_loaded_cw_suites": queue_json["summary"]["chainsWithLoadedCwSuites"],
|
||||
"chains_missing_cw_suites": queue_json["summary"]["chainsMissingCwSuites"],
|
||||
"first_tier_wave1_pools_planned": queue_json["summary"]["firstTierWave1PoolsPlanned"],
|
||||
"first_tier_wave1_pools_recorded_live": queue_json["summary"]["firstTierWave1PoolsRecordedLive"],
|
||||
"first_tier_wave1_pools_missing": queue_json["summary"]["firstTierWave1PoolsPlanned"] - queue_json["summary"]["firstTierWave1PoolsRecordedLive"],
|
||||
"wave1_transport_pending_assets": queue_json["summary"]["wave1TransportPending"],
|
||||
"missing_token_rows": len(missing_tokens_rows),
|
||||
"networks_with_missing_tokens": len({row["network"] for row in missing_tokens_rows}),
|
||||
}
|
||||
|
||||
result = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
||||
"description": "Canonical machine-readable GRU v2 public Wave 1 rollout gap report.",
|
||||
"sources": [
|
||||
"scripts/verify/check-gru-v2-deployment-queue.sh --json",
|
||||
"cross-chain-pmm-lps/config/deployment-status.json",
|
||||
"config/gru-v2-d3mm-network-expansion-plan.json",
|
||||
"cross-chain-pmm-lps/config/pool-matrix.json",
|
||||
],
|
||||
"summary": summary,
|
||||
"chains": sorted(chains, key=lambda item: item["chain_id"]),
|
||||
"missing_first_tier_wave1_pools": sorted(missing_pool_rows, key=lambda item: (item["chain_id"], item["pair"])),
|
||||
"live_first_tier_wave1_pools": sorted(live_pool_rows, key=lambda item: (item["chain_id"], item["pair"])),
|
||||
"missing_tokens": sorted(missing_tokens_rows, key=lambda item: (item["chain_id"], item["token_type"], item["token"])),
|
||||
}
|
||||
|
||||
output_path.write_text(json.dumps(result, indent=2) + "\n")
|
||||
print(json.dumps(summary, indent=2))
|
||||
PY
|
||||
|
||||
echo "Wrote ${OUTPUT_PATH}"
|
||||
@@ -1,56 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
SOURCE_TARGET_PAIRS=(
|
||||
".gitea/workflow-sources/deploy-to-phoenix.yml:.gitea/workflows/deploy-to-phoenix.yml"
|
||||
".gitea/workflow-sources/validate-on-pr.yml:.gitea/workflows/validate-on-pr.yml"
|
||||
)
|
||||
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-origin}"
|
||||
if git remote | grep -qx gitea; then
|
||||
REMOTE="${GITEA_WORKFLOW_REMOTE:-gitea}"
|
||||
fi
|
||||
|
||||
missing_ref=false
|
||||
for ref in "$REMOTE/main" "$REMOTE/master"; do
|
||||
if ! git rev-parse --verify "$ref" >/dev/null 2>&1; then
|
||||
missing_ref=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$missing_ref" == true ]]; then
|
||||
echo "[i] Skipping main/master workflow parity check ($REMOTE/main or $REMOTE/master not available)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for pair in "${SOURCE_TARGET_PAIRS[@]}"; do
|
||||
source="${pair%%:*}"
|
||||
target="${pair##*:}"
|
||||
|
||||
main_blob="$(git show "$REMOTE/main:$source" 2>/dev/null || true)"
|
||||
master_blob="$(git show "$REMOTE/master:$source" 2>/dev/null || true)"
|
||||
|
||||
if [[ -z "$main_blob" ]]; then
|
||||
main_blob="$(git show "$REMOTE/main:$target" 2>/dev/null || true)"
|
||||
fi
|
||||
if [[ -z "$master_blob" ]]; then
|
||||
master_blob="$(git show "$REMOTE/master:$target" 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
if [[ -z "$main_blob" || -z "$master_blob" ]]; then
|
||||
echo "[✗] Missing $source/$target on $REMOTE/main or $REMOTE/master" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$main_blob" != "$master_blob" ]]; then
|
||||
echo "[✗] Branch workflow drift: $source differs between $REMOTE/main and $REMOTE/master" >&2
|
||||
echo " Keep both deploy branches in lockstep for workflow-source files." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[✓] Branch parity OK for $source"
|
||||
done
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
check_one() {
|
||||
local source_rel="$1"
|
||||
local target_rel="$2"
|
||||
|
||||
if [[ ! -f "$source_rel" ]]; then
|
||||
echo "[✗] Missing workflow source: $source_rel" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$target_rel" ]]; then
|
||||
echo "[✗] Missing generated workflow: $target_rel" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! diff -u "$source_rel" "$target_rel" >/dev/null; then
|
||||
echo "[✗] Workflow drift detected: $target_rel does not match $source_rel" >&2
|
||||
echo " Run: bash scripts/verify/sync-gitea-workflows.sh" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "[✓] $target_rel matches $source_rel"
|
||||
}
|
||||
|
||||
check_one ".gitea/workflow-sources/deploy-to-phoenix.yml" ".gitea/workflows/deploy-to-phoenix.yml"
|
||||
check_one ".gitea/workflow-sources/validate-on-pr.yml" ".gitea/workflows/validate-on-pr.yml"
|
||||
39
scripts/verify/check-gru-v2-core-protocol-blockers.sh
Executable file
39
scripts/verify/check-gru-v2-core-protocol-blockers.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
failures=0
|
||||
|
||||
check_file() {
|
||||
local label="$1" path="$2"
|
||||
if [[ -f "$path" ]]; then
|
||||
echo "OK $label -> $path"
|
||||
else
|
||||
echo "MISS $label -> $path"
|
||||
failures=1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=== GRU v2 core protocol blocker check ==="
|
||||
|
||||
check_file "138 DODO PMM mesh sync" "$PROJECT_ROOT/scripts/create-pmm-full-mesh-chain138.sh"
|
||||
check_file "138 pilot protocol venue deployer" "$PROJECT_ROOT/scripts/deployment/deploy-chain138-pilot-protocol-venues.sh"
|
||||
check_file "138 Aave execution stack deployer" "$PROJECT_ROOT/scripts/deployment/deploy-chain138-aave-v3-execution-stack.sh"
|
||||
check_file "138 Aave quote-push receiver deployer" "$PROJECT_ROOT/scripts/deployment/deploy-chain138-aave-quote-push-receiver.sh"
|
||||
check_file "138 remaining protocol surface" "$PROJECT_ROOT/config/chain138-remaining-protocol-surface.json"
|
||||
check_file "138 remaining protocol env verifier" "$PROJECT_ROOT/scripts/verify/check-chain138-remaining-protocol-env.sh"
|
||||
check_file "651940 cA* token deployer" "$PROJECT_ROOT/scripts/deployment/deploy-allmainnet-ca-tokens.sh"
|
||||
check_file "651940 PMM desired-state sync" "$PROJECT_ROOT/scripts/deployment/sync-allmainnet-pmm-pools-from-json.sh"
|
||||
check_file "651940 cA* token catalog" "$PROJECT_ROOT/config/allmainnet-ca-token-catalog.json"
|
||||
check_file "651940 PMM desired-state config" "$PROJECT_ROOT/config/allmainnet-pmm-pools.json"
|
||||
check_file "GRU deployment implementation status doc" "$PROJECT_ROOT/docs/04-configuration/GRU_V2_FULL_DEPLOYMENT_IMPLEMENTATION_STATUS.md"
|
||||
|
||||
echo "--- remaining external blockers ---"
|
||||
echo "INFO Chain 138 Aave still requires real CHAIN_138_AAVE_POOL / provider / data-provider addresses before the new deploy wrappers can be applied."
|
||||
echo "INFO Chain 138 GMX and dYdX still require canonical live addresses and native protocol stacks."
|
||||
echo "INFO Full venue completion for 651940 across Uniswap v2/v3, SushiSwap, Curve, Balancer, 1Inch, Aave, GMX, and dYdX still depends on live venue addresses, chain support, and liquidity."
|
||||
echo "INFO 651940 PMM sync also requires live DODO integration/provider addresses and final deployed cA* token addresses."
|
||||
|
||||
exit "$failures"
|
||||
52
scripts/verify/check-gru-v2-full-deployment-implementation.py
Executable file
52
scripts/verify/check-gru-v2-full-deployment-implementation.py
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Verify which parts of the GRU v2 full deployment plan are actually implemented in-repo."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
def exists(rel: str) -> bool:
|
||||
return (ROOT / rel).exists()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
checks = {
|
||||
"chain138_mesh_sync": exists("scripts/create-pmm-full-mesh-chain138.sh"),
|
||||
"chain138_next_steps_runner": exists("scripts/deployment/run-all-next-steps-chain138.sh"),
|
||||
"chain138_readiness_check": exists("scripts/verify/check-gru-v2-chain138-readiness.sh"),
|
||||
"gru_master_matrix": exists("docs/04-configuration/GRU_V2_FULL_MESH_MASTER_MATRIX.md"),
|
||||
"gru_live_status_report": exists("docs/04-configuration/GRU_V2_FULL_MESH_LIVE_STATUS_REPORT.md"),
|
||||
"gru_protocol_matrix": exists("docs/04-configuration/GRU_V2_PROTOCOL_COMPLETION_MATRIX.md"),
|
||||
"gru_pool_tracker": exists("config/gru-v2-full-mesh-pool-tracker.json"),
|
||||
"all_mainnet_token_inventory": exists("docs/11-references/ALL_MAINNET_TOKEN_ADDRESSES.md"),
|
||||
"all_mainnet_full_cA_deployer": exists("scripts/deployment/deploy-allmainnet-ca-tokens.sh"),
|
||||
"all_mainnet_full_mesh_deployer": exists("scripts/deployment/sync-allmainnet-pmm-pools-from-json.sh"),
|
||||
"chain138_uniswap_curve_balancer_full_deployer": exists("scripts/deployment/deploy-chain138-pilot-protocol-venues.sh"),
|
||||
"chain138_aave_execution_deployer": exists("scripts/deployment/deploy-chain138-aave-v3-execution-stack.sh"),
|
||||
"chain138_aave_receiver_deployer": exists("scripts/deployment/deploy-chain138-aave-quote-push-receiver.sh"),
|
||||
"chain138_remaining_protocol_inventory": exists("config/chain138-remaining-protocol-surface.json"),
|
||||
"chain138_remaining_protocol_checker": exists("scripts/verify/check-chain138-remaining-protocol-env.sh"),
|
||||
"all_mainnet_uniswap_curve_balancer_full_deployer": False,
|
||||
}
|
||||
|
||||
missing_repo_backed = [
|
||||
"all_mainnet_uniswap_curve_balancer_full_deployer",
|
||||
]
|
||||
|
||||
report = {
|
||||
"implemented": {k: v for k, v in checks.items() if v},
|
||||
"missing_or_external_blocked": {k: checks[k] for k in missing_repo_backed if not checks[k]},
|
||||
}
|
||||
|
||||
print(json.dumps(report, indent=2, sort_keys=True))
|
||||
return 1 if any(not checks[k] for k in missing_repo_backed) else 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,50 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Every path listed under "packages:" in pnpm-workspace.yaml must have a matching
|
||||
# importer entry in pnpm-lock.yaml. If one is missing, pnpm can fail in confusing
|
||||
# ways (e.g. pnpm outdated -r: Cannot read ... 'optionalDependencies').
|
||||
# Usage: bash scripts/verify/check-pnpm-workspace-lockfile.sh
|
||||
# Exit: 0 if check passes or pnpm is not used; 1 on mismatch.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
WS="${PROJECT_ROOT}/pnpm-workspace.yaml"
|
||||
LOCK="${PROJECT_ROOT}/pnpm-lock.yaml"
|
||||
|
||||
if [[ ! -f "$WS" ]] || [[ ! -f "$LOCK" ]]; then
|
||||
echo " (skip: pnpm-workspace.yaml or pnpm-lock.yaml not present at repo root)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Paths under the top-level `packages:` block only (stops at next top-level key)
|
||||
mapfile -t _paths < <(awk '
|
||||
/^packages:/ { p=1; next }
|
||||
p && /^[a-zA-Z]/ && $0 !~ /^packages/ { exit }
|
||||
p && /^[[:space:]]*-[[:space:]]/ {
|
||||
sub(/^[[:space:]]*-[[:space:]]+/, "")
|
||||
sub(/[[:space:]]*#.*/, "")
|
||||
gsub(/[[:space:]]+$/, "")
|
||||
if (length) print
|
||||
}
|
||||
' "$WS")
|
||||
|
||||
missing=()
|
||||
for relp in "${_paths[@]}"; do
|
||||
if [[ -z "$relp" ]]; then
|
||||
continue
|
||||
fi
|
||||
if ! grep -qFx " ${relp}:" "$LOCK"; then
|
||||
missing+=("$relp")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo "✗ pnpm lockfile is missing importer(s) for these workspace path(s):"
|
||||
printf ' %q\n' "${missing[@]}"
|
||||
echo " Run: pnpm install (at repo root) to refresh pnpm-lock.yaml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo " pnpm workspace / lockfile importers aligned (${#_paths[@]} path(s))."
|
||||
exit 0
|
||||
30
scripts/verify/export-gru-v2-full-mesh-tracker-csv.py
Normal file
30
scripts/verify/export-gru-v2-full-mesh-tracker-csv.py
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python3
|
||||
import csv
|
||||
import json
|
||||
import pathlib
|
||||
import sys
|
||||
|
||||
|
||||
ROOT = pathlib.Path("/home/intlc/projects/proxmox")
|
||||
TRACKER = ROOT / "config/gru-v2-full-mesh-pool-tracker.json"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
data = json.loads(TRACKER.read_text())
|
||||
out = csv.writer(sys.stdout)
|
||||
out.writerow(["namespace", "chain", "pair", "priority", "status"])
|
||||
|
||||
for row in data["chain138"]["entries"]:
|
||||
out.writerow(["c* V2", "138", row["pair"], row.get("priority", ""), data["defaultFields"]["status"]])
|
||||
|
||||
for row in data["allMainnet651940"]["entries"]:
|
||||
out.writerow(["cA*", "651940", row["pair"], row.get("priority", ""), data["defaultFields"]["status"]])
|
||||
|
||||
for chain, bucket in data["publicMesh"].items():
|
||||
status = bucket.get("statusOverride", data["defaultFields"]["status"])
|
||||
for pair in bucket["entries"]:
|
||||
out.writerow(["cW*", chain, pair, "", status])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -3,7 +3,6 @@
|
||||
# Use for CI or pre-deploy: dependencies, config files, optional genesis.
|
||||
# Usage: bash scripts/verify/run-all-validation.sh [--skip-genesis]
|
||||
# --skip-genesis: do not run validate-genesis.sh (default: run if smom-dbis-138 present).
|
||||
# Steps: dependencies, config files, cW* mesh matrix (if pair-discovery JSON exists), genesis.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@@ -25,64 +24,15 @@ bash "$SCRIPT_DIR/check-dependencies.sh" || log_err "check-dependencies failed"
|
||||
log_ok "Dependencies OK"
|
||||
echo ""
|
||||
|
||||
echo "1b. pnpm workspace vs lockfile..."
|
||||
if [[ -f "$PROJECT_ROOT/pnpm-workspace.yaml" ]]; then
|
||||
bash "$SCRIPT_DIR/check-pnpm-workspace-lockfile.sh" || log_err "pnpm lockfile / workspace drift"
|
||||
log_ok "pnpm lockfile aligned with workspace"
|
||||
else
|
||||
echo " (no pnpm-workspace.yaml at root — skip)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "1c. Gitea workflow source sync..."
|
||||
bash "$SCRIPT_DIR/check-gitea-workflows.sh" || log_err "Gitea workflow source drift"
|
||||
log_ok "Gitea workflows match source-of-truth files"
|
||||
echo ""
|
||||
|
||||
echo "1d. main/master workflow parity..."
|
||||
bash "$SCRIPT_DIR/check-gitea-branch-workflow-parity.sh" || log_err "main/master workflow parity drift"
|
||||
log_ok "main/master workflow parity OK"
|
||||
echo ""
|
||||
|
||||
echo "2. Config files..."
|
||||
bash "$SCRIPT_DIR/../validation/validate-config-files.sh" || log_err "validate-config-files failed"
|
||||
log_ok "Config validation OK"
|
||||
echo ""
|
||||
|
||||
echo "3. cW* mesh matrix (deployment-status + Uni V2 pair-discovery)..."
|
||||
DISCOVERY_JSON="$PROJECT_ROOT/reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json"
|
||||
if [[ -f "$DISCOVERY_JSON" ]]; then
|
||||
MATRIX_JSON="$PROJECT_ROOT/reports/status/cw-mesh-deployment-matrix-latest.json"
|
||||
bash "$SCRIPT_DIR/build-cw-mesh-deployment-matrix.sh" --no-markdown --json-out "$MATRIX_JSON" || log_err "cw mesh matrix merge failed"
|
||||
log_ok "cW mesh matrix OK (also wrote $MATRIX_JSON)"
|
||||
else
|
||||
echo " ($DISCOVERY_JSON missing — run: bash scripts/verify/build-promod-uniswap-v2-live-pair-discovery.sh)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "3b. deployment-status graph (cross-chain-pmm-lps)..."
|
||||
PMM_VALIDATE="$PROJECT_ROOT/cross-chain-pmm-lps/scripts/validate-deployment-status.cjs"
|
||||
if [[ -f "$PMM_VALIDATE" ]] && command -v node &>/dev/null; then
|
||||
node "$PMM_VALIDATE" || log_err "validate-deployment-status.cjs failed"
|
||||
log_ok "deployment-status.json rules OK"
|
||||
else
|
||||
echo " (skip: node or $PMM_VALIDATE missing)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "3c. External dependency blockers..."
|
||||
EXT_CHECK="$SCRIPT_DIR/check-external-dependencies.sh"
|
||||
if [[ -x "$EXT_CHECK" ]]; then
|
||||
bash "$EXT_CHECK" --advisory || true
|
||||
else
|
||||
echo " (skip: $EXT_CHECK missing)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
if [[ "$SKIP_GENESIS" == true ]]; then
|
||||
echo "4. Genesis — skipped (--skip-genesis)"
|
||||
echo "3. Genesis — skipped (--skip-genesis)"
|
||||
else
|
||||
echo "4. Genesis (smom-dbis-138)..."
|
||||
echo "3. Genesis (smom-dbis-138)..."
|
||||
GENESIS_SCRIPT="$PROJECT_ROOT/smom-dbis-138/scripts/validation/validate-genesis.sh"
|
||||
if [[ -x "$GENESIS_SCRIPT" ]]; then
|
||||
bash "$GENESIS_SCRIPT" || log_err "validate-genesis failed"
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
sync_one() {
|
||||
local source_rel="$1"
|
||||
local target_rel="$2"
|
||||
|
||||
mkdir -p "$(dirname "$target_rel")"
|
||||
cp "$source_rel" "$target_rel"
|
||||
echo "[✓] Synced $target_rel from $source_rel"
|
||||
}
|
||||
|
||||
sync_one ".gitea/workflow-sources/deploy-to-phoenix.yml" ".gitea/workflows/deploy-to-phoenix.yml"
|
||||
sync_one ".gitea/workflow-sources/validate-on-pr.yml" ".gitea/workflows/validate-on-pr.yml"
|
||||
@@ -0,0 +1,133 @@
|
||||
{
|
||||
"statusDate": "2026-04-14",
|
||||
"namespaces": {
|
||||
"chain138": "c* V2",
|
||||
"allMainnet651940": "cA*",
|
||||
"publicConnectedNetworks": "cW*"
|
||||
},
|
||||
"executionPhases": [
|
||||
{
|
||||
"id": "P0",
|
||||
"namespace": "c* V2",
|
||||
"scope": "Chain 138 canonical hub and Wave 1"
|
||||
},
|
||||
{
|
||||
"id": "P1",
|
||||
"namespace": "c* V2",
|
||||
"scope": "Chain 138 cross-links and gas-native hubs"
|
||||
},
|
||||
{
|
||||
"id": "P2",
|
||||
"namespace": "cA*",
|
||||
"scope": "ALL Mainnet canonical hub and Wave 1"
|
||||
},
|
||||
{
|
||||
"id": "P3",
|
||||
"namespace": "cA*",
|
||||
"scope": "ALL Mainnet cross-links and gas-native hubs"
|
||||
},
|
||||
{
|
||||
"id": "P4",
|
||||
"namespace": "cW*",
|
||||
"scope": "Public cW stable hub, Wave 1, and gas-native mesh"
|
||||
},
|
||||
{
|
||||
"id": "P5",
|
||||
"namespace": "all",
|
||||
"scope": "Spot venue protocol completion"
|
||||
},
|
||||
{
|
||||
"id": "P6",
|
||||
"namespace": "all",
|
||||
"scope": "Aggregator, reserve, and market protocol completion"
|
||||
},
|
||||
{
|
||||
"id": "P7",
|
||||
"namespace": "all",
|
||||
"scope": "MEV completion"
|
||||
}
|
||||
],
|
||||
"protocolsRequired": [
|
||||
"DODO",
|
||||
"Uniswap v3",
|
||||
"Uniswap v2",
|
||||
"SushiSwap",
|
||||
"Curve",
|
||||
"Balancer",
|
||||
"1Inch",
|
||||
"Aave",
|
||||
"GMX",
|
||||
"dYdX"
|
||||
],
|
||||
"chain138CanonicalPools": [
|
||||
"cUSDT V2 / cUSDC V2",
|
||||
"cUSDT V2 / USDT",
|
||||
"cUSDC V2 / USDC",
|
||||
"cEURC V2 / cUSDC V2",
|
||||
"cEURT V2 / cUSDC V2",
|
||||
"cGBPC V2 / cUSDC V2",
|
||||
"cGBPT V2 / cUSDC V2",
|
||||
"cAUDC V2 / cUSDC V2",
|
||||
"cJPYC V2 / cUSDC V2",
|
||||
"cCHFC V2 / cUSDC V2",
|
||||
"cCADC V2 / cUSDC V2",
|
||||
"cXAUC V2 / cUSDC V2",
|
||||
"cXAUT V2 / cUSDC V2",
|
||||
"cEURC V2 / cEURT V2",
|
||||
"cGBPC V2 / cGBPT V2",
|
||||
"cXAUC V2 / cXAUT V2",
|
||||
"cETH / WETH",
|
||||
"cETH / cUSDC V2",
|
||||
"cETHL2 / cUSDC V2",
|
||||
"cBNB / cUSDC V2",
|
||||
"cPOL / cUSDC V2",
|
||||
"cAVAX / cUSDC V2",
|
||||
"cCRO / cUSDC V2",
|
||||
"cXDAI / cUSDC V2",
|
||||
"cCELO / cUSDC V2",
|
||||
"cWEMIX / cUSDC V2"
|
||||
],
|
||||
"allMainnetCanonicalPools": [
|
||||
"cAUSDT / cAUSDC",
|
||||
"cAUSDT / AUSDT",
|
||||
"cAUSDC / USDC",
|
||||
"cAEURC / cAUSDC",
|
||||
"cAEURT / cAUSDC",
|
||||
"cAGBPC / cAUSDC",
|
||||
"cAGBPT / cAUSDC",
|
||||
"cAAUDC / cAUSDC",
|
||||
"cAJPYC / cAUSDC",
|
||||
"cACHFC / cAUSDC",
|
||||
"cACADC / cAUSDC",
|
||||
"cAXAUC / cAUSDC",
|
||||
"cAXAUT / cAUSDC",
|
||||
"cAEURC / cAEURT",
|
||||
"cAGBPC / cAGBPT",
|
||||
"cAXAUC / cAXAUT",
|
||||
"cAETH / WETH",
|
||||
"cAETH / cAUSDC",
|
||||
"cAWALL / WALL",
|
||||
"cAWALL / cAUSDC"
|
||||
],
|
||||
"publicMeshTemplate": {
|
||||
"stableHub": [
|
||||
"cWUSDT / USDC",
|
||||
"cWUSDC / USDC",
|
||||
"cWUSDT / USDT",
|
||||
"cWUSDC / USDT",
|
||||
"cWUSDT / cWUSDC"
|
||||
],
|
||||
"wave1VsUsdc": [
|
||||
"cWEURC / USDC",
|
||||
"cWEURT / USDC",
|
||||
"cWGBPC / USDC",
|
||||
"cWGBPT / USDC",
|
||||
"cWAUDC / USDC",
|
||||
"cWJPYC / USDC",
|
||||
"cWCHFC / USDC",
|
||||
"cWCADC / USDC",
|
||||
"cWXAUC / USDC",
|
||||
"cWXAUT / USDC"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,169 @@
|
||||
{
|
||||
"statusDate": "2026-04-15",
|
||||
"defaultFields": {
|
||||
"status": "todo",
|
||||
"deployed": false,
|
||||
"seeded": false,
|
||||
"validated": false,
|
||||
"live": false,
|
||||
"mevReady": false
|
||||
},
|
||||
"chain138": {
|
||||
"namespace": "c* V2",
|
||||
"entries": [
|
||||
{ "pair": "cUSDT V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cUSDT V2 / USDT", "priority": "P0" },
|
||||
{ "pair": "cUSDC V2 / USDC", "priority": "P0" },
|
||||
{ "pair": "cEURC V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cEURT V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cGBPC V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cGBPT V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cAUDC V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cJPYC V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cCHFC V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cCADC V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cXAUC V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cXAUT V2 / cUSDC V2", "priority": "P0" },
|
||||
{ "pair": "cEURC V2 / cEURT V2", "priority": "P1" },
|
||||
{ "pair": "cGBPC V2 / cGBPT V2", "priority": "P1" },
|
||||
{ "pair": "cXAUC V2 / cXAUT V2", "priority": "P1" },
|
||||
{ "pair": "cETH / WETH", "priority": "P1" },
|
||||
{ "pair": "cETH / cUSDC V2", "priority": "P1" },
|
||||
{ "pair": "cETHL2 / cUSDC V2", "priority": "P2" },
|
||||
{ "pair": "cBNB / cUSDC V2", "priority": "P2" },
|
||||
{ "pair": "cPOL / cUSDC V2", "priority": "P2" },
|
||||
{ "pair": "cAVAX / cUSDC V2", "priority": "P2" },
|
||||
{ "pair": "cCRO / cUSDC V2", "priority": "P2" },
|
||||
{ "pair": "cXDAI / cUSDC V2", "priority": "P2" },
|
||||
{ "pair": "cCELO / cUSDC V2", "priority": "P2" },
|
||||
{ "pair": "cWEMIX / cUSDC V2", "priority": "P2" }
|
||||
]
|
||||
},
|
||||
"allMainnet651940": {
|
||||
"namespace": "cA*",
|
||||
"entries": [
|
||||
{ "pair": "cAUSDT / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAUSDT / AUSDT", "priority": "P0" },
|
||||
{ "pair": "cAUSDC / USDC", "priority": "P0" },
|
||||
{ "pair": "cAEURC / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAEURT / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAGBPC / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAGBPT / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAAUDC / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAJPYC / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cACHFC / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cACADC / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAXAUC / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAXAUT / cAUSDC", "priority": "P0" },
|
||||
{ "pair": "cAEURC / cAEURT", "priority": "P1" },
|
||||
{ "pair": "cAGBPC / cAGBPT", "priority": "P1" },
|
||||
{ "pair": "cAXAUC / cAXAUT", "priority": "P1" },
|
||||
{ "pair": "cAETH / WETH", "priority": "P1" },
|
||||
{ "pair": "cAETH / cAUSDC", "priority": "P1" },
|
||||
{ "pair": "cAWALL / WALL", "priority": "P1" },
|
||||
{ "pair": "cAWALL / cAUSDC", "priority": "P1" }
|
||||
]
|
||||
},
|
||||
"publicMesh": {
|
||||
"1": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWETH / WETH","cWETH / USDC"
|
||||
]
|
||||
},
|
||||
"10": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWETHL2 / WETH","cWETHL2 / USDC"
|
||||
]
|
||||
},
|
||||
"25": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWCRO / WCRO","cWCRO / USDT"
|
||||
]
|
||||
},
|
||||
"56": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWBNB / WBNB","cWBNB / USDT"
|
||||
]
|
||||
},
|
||||
"100": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWXDAI / WXDAI","cWXDAI / USDC"
|
||||
]
|
||||
},
|
||||
"137": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWPOL / WPOL","cWPOL / USDC"
|
||||
]
|
||||
},
|
||||
"8453": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWETHL2 / WETH","cWETHL2 / USDC"
|
||||
]
|
||||
},
|
||||
"42161": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWETHL2 / WETH","cWETHL2 / USDC"
|
||||
]
|
||||
},
|
||||
"42220": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWCELO / WCELO","cWCELO / USDC"
|
||||
]
|
||||
},
|
||||
"43114": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWAVAX / WAVAX","cWAVAX / USDC"
|
||||
]
|
||||
},
|
||||
"1111": {
|
||||
"namespace": "cW*",
|
||||
"entries": [
|
||||
"cWUSDT / USDC","cWUSDC / USDC","cWUSDT / USDT","cWUSDC / USDT","cWUSDT / cWUSDC",
|
||||
"cWEURC / USDC","cWEURT / USDC","cWGBPC / USDC","cWGBPT / USDC","cWAUDC / USDC",
|
||||
"cWJPYC / USDC","cWCHFC / USDC","cWCADC / USDC","cWXAUC / USDC","cWXAUT / USDC",
|
||||
"cWWEMIX / WWEMIX","cWWEMIX / USDC"
|
||||
],
|
||||
"statusOverride": "planned"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "https://d-bis.org/schemas/gru-v2-full-mesh-pool-tracker.json",
|
||||
"title": "GRU v2 Full Mesh Pool Tracker",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"statusDate",
|
||||
"defaultFields",
|
||||
"chain138",
|
||||
"allMainnet651940",
|
||||
"publicMesh"
|
||||
],
|
||||
"properties": {
|
||||
"statusDate": {
|
||||
"type": "string",
|
||||
"pattern": "^\\d{4}-\\d{2}-\\d{2}$"
|
||||
},
|
||||
"defaultFields": {
|
||||
"type": "object",
|
||||
"required": ["status", "deployed", "seeded", "validated", "live", "mevReady"],
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": ["todo", "in_progress", "blocked", "done"]
|
||||
},
|
||||
"deployed": { "type": "boolean" },
|
||||
"seeded": { "type": "boolean" },
|
||||
"validated": { "type": "boolean" },
|
||||
"live": { "type": "boolean" },
|
||||
"mevReady": { "type": "boolean" }
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"chain138": { "$ref": "#/$defs/namedBucket" },
|
||||
"allMainnet651940": { "$ref": "#/$defs/namedBucket" },
|
||||
"publicMesh": {
|
||||
"type": "object",
|
||||
"minProperties": 1,
|
||||
"additionalProperties": { "$ref": "#/$defs/meshBucket" }
|
||||
}
|
||||
},
|
||||
"$defs": {
|
||||
"pairEntry": {
|
||||
"type": "object",
|
||||
"required": ["pair"],
|
||||
"properties": {
|
||||
"pair": { "type": "string", "minLength": 3 },
|
||||
"priority": { "type": "string", "minLength": 2 }
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"namedBucket": {
|
||||
"type": "object",
|
||||
"required": ["namespace", "entries"],
|
||||
"properties": {
|
||||
"namespace": { "type": "string", "minLength": 2 },
|
||||
"entries": {
|
||||
"type": "array",
|
||||
"items": { "$ref": "#/$defs/pairEntry" }
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"meshBucket": {
|
||||
"type": "object",
|
||||
"required": ["namespace", "entries"],
|
||||
"properties": {
|
||||
"namespace": { "type": "string", "minLength": 2 },
|
||||
"entries": {
|
||||
"type": "array",
|
||||
"items": { "type": "string", "minLength": 3 }
|
||||
},
|
||||
"statusOverride": {
|
||||
"type": "string",
|
||||
"enum": ["planned", "todo", "in_progress", "blocked", "done"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
Reference in New Issue
Block a user