chore(repo): sync operator workspace (config, scripts, docs, multi-chain)
Add optional Cosmos/Engine-X/act-runner templates, CWUSDC/EI-matrix tooling, non-EVM route planner in multi-chain-execution (tests passing), token list and extraction updates, and documentation (MetaMask matrix, GRU/CWUSDC packets). Ignore institutional evidence tarballs/sha256 under reports/status. Validated with: bash scripts/verify/run-all-validation.sh --skip-genesis Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
246
scripts/lib/ei_matrix_multicall3_cwusdc_batch.py
Normal file
246
scripts/lib/ei_matrix_multicall3_cwusdc_batch.py
Normal file
@@ -0,0 +1,246 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Batch mainnet cWUSDC to EI matrix wallets via canonical Multicall3 aggregate3.
|
||||
|
||||
Each inner call is transferFrom(deployer, recipient, amount) on the token, so
|
||||
msg.sender is Multicall3. Requires a prior approve(deployer -> Multicall3) for
|
||||
at least the sum of amounts in this run (one tx before batches).
|
||||
|
||||
Default Multicall3 (Ethereum): 0xcA11bde05977b3631167028862bE2a173976CA11
|
||||
|
||||
Examples:
|
||||
python3 scripts/lib/ei_matrix_multicall3_cwusdc_batch.py --dry-run \\
|
||||
--tsv reports/status/ei-matrix-cwusdc-topup-amounts.tsv
|
||||
|
||||
python3 scripts/lib/ei_matrix_multicall3_cwusdc_batch.py --execute \\
|
||||
--tsv reports/status/ei-matrix-cwusdc-topup-amounts.tsv
|
||||
|
||||
Env: PRIVATE_KEY (or DEPLOYER_ADDRESS for dry-run calldata only), ETHEREUM_MAINNET_RPC,
|
||||
CWUSDC_MAINNET (optional), MULTICALL3_MAINNET (optional), EI_MATRIX_MC_CHUNK (default 200).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
MULTICALL3_MAINNET = "0xcA11bde05977b3631167028862bE2a173976CA11"
|
||||
DEFAULT_CWUSDC = "0x2de5F116bFcE3d0f922d9C8351e0c5Fc24b9284a"
|
||||
|
||||
|
||||
def _sh(cmd: list[str]) -> str:
|
||||
r = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
if r.returncode != 0:
|
||||
raise RuntimeError(f"command failed: {' '.join(cmd)}\n{(r.stderr or r.stdout).strip()}")
|
||||
return (r.stdout or "").strip()
|
||||
|
||||
|
||||
def _deployer(pk: str | None) -> str:
|
||||
if pk:
|
||||
return _sh(["cast", "wallet", "address", "--private-key", pk])
|
||||
env = (os.environ.get("DEPLOYER_ADDRESS") or os.environ.get("DEPLOYER") or "").strip()
|
||||
if env:
|
||||
return env
|
||||
raise SystemExit("Set PRIVATE_KEY or DEPLOYER_ADDRESS for transferFrom(from=...)")
|
||||
|
||||
|
||||
def _cast_calldata_transfer_from(from_addr: str, to_addr: str, amount: int) -> str:
|
||||
out = _sh(["cast", "calldata", "transferFrom(address,address,uint256)", from_addr, to_addr, str(amount)])
|
||||
return out if out.startswith("0x") else "0x" + out
|
||||
|
||||
|
||||
def _cast_calldata_aggregate3(calls_tuple_str: str) -> str:
|
||||
out = _sh(["cast", "calldata", "aggregate3((address,bool,bytes)[])", calls_tuple_str])
|
||||
return out if out.startswith("0x") else "0x" + out
|
||||
|
||||
|
||||
def _estimate_gas(from_addr: str, multicall: str, data: str, rpc_url: str) -> int:
|
||||
payload = json.dumps({"from": from_addr, "to": multicall, "data": data})
|
||||
raw = _sh(["cast", "rpc", "eth_estimateGas", payload, "--rpc-url", rpc_url])
|
||||
return int(raw, 16)
|
||||
|
||||
|
||||
def _allowance(token: str, owner: str, spender: str, rpc_url: str) -> int:
|
||||
out = _sh(["cast", "call", token, "allowance(address,address)(uint256)", owner, spender, "--rpc-url", rpc_url])
|
||||
return int(out.split()[0], 0)
|
||||
|
||||
|
||||
def _send_cast_send(to: str, sig: str, args: list[str], rpc_url: str, pk: str, gas_limit: str | None) -> None:
|
||||
cmd = ["cast", "send", to, sig, *args, "--rpc-url", rpc_url, "--private-key", pk]
|
||||
if gas_limit:
|
||||
cmd.extend(["--gas-limit", gas_limit])
|
||||
print("→", " ".join(cmd[:8]), "…", file=sys.stderr)
|
||||
r = subprocess.run(cmd, env={**os.environ})
|
||||
if r.returncode != 0:
|
||||
sys.exit(r.returncode)
|
||||
|
||||
|
||||
def _send_raw_calldata(to: str, data: str, rpc_url: str, pk: str, gas_limit: str) -> None:
|
||||
cmd = ["cast", "send", to, data, "--rpc-url", rpc_url, "--private-key", pk, "--gas-limit", gas_limit]
|
||||
print("→ cast send", to[:10] + "…", "--gas-limit", gas_limit, file=sys.stderr)
|
||||
r = subprocess.run(cmd, env={**os.environ})
|
||||
if r.returncode != 0:
|
||||
sys.exit(r.returncode)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("--tsv", required=True, help="linearIndex TAB amountRaw")
|
||||
ap.add_argument("--grid", default="config/pmm-soak-wallet-grid.json")
|
||||
ap.add_argument("--chunk-size", type=int, default=int(os.environ.get("EI_MATRIX_MC_CHUNK", "200")))
|
||||
ap.add_argument("--multicall", default=os.environ.get("MULTICALL3_MAINNET", MULTICALL3_MAINNET))
|
||||
ap.add_argument("--token", default=os.environ.get("CWUSDC_MAINNET", DEFAULT_CWUSDC))
|
||||
ap.add_argument("--rpc-url", default=os.environ.get("ETHEREUM_MAINNET_RPC") or os.environ.get("RPC_URL_1") or "")
|
||||
ap.add_argument("--dry-run", action="store_true")
|
||||
ap.add_argument("--execute", action="store_true")
|
||||
ap.add_argument("--gas-headroom-bps", type=int, default=13000)
|
||||
ap.add_argument("--min-gas-per-batch", type=int, default=500_000)
|
||||
ap.add_argument("--start-batch", type=int, default=0)
|
||||
ap.add_argument("--max-batches", type=int, default=0, help="0 = all remaining")
|
||||
ap.add_argument("--progress-file", default="reports/status/ei-matrix-multicall3-batch-progress.txt")
|
||||
args = ap.parse_args()
|
||||
|
||||
if not args.rpc_url:
|
||||
print("Need --rpc-url or ETHEREUM_MAINNET_RPC / RPC_URL_1", file=sys.stderr)
|
||||
return 2
|
||||
if args.dry_run == args.execute:
|
||||
print("Specify exactly one of --dry-run or --execute", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
repo = Path(__file__).resolve().parents[2]
|
||||
grid_path = repo / args.grid if not os.path.isabs(args.grid) else Path(args.grid)
|
||||
tsv_path = repo / args.tsv if not os.path.isabs(args.tsv) else Path(args.tsv)
|
||||
|
||||
wallets = json.loads(grid_path.read_text(encoding="utf-8"))["wallets"]
|
||||
rows: list[tuple[str, int]] = []
|
||||
for line in tsv_path.read_text(encoding="utf-8").splitlines():
|
||||
line = line.split("#", 1)[0].strip()
|
||||
if not line:
|
||||
continue
|
||||
parts = line.split("\t")
|
||||
if len(parts) < 2:
|
||||
parts = line.split()
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
idx = int(parts[0])
|
||||
amt = int(parts[1])
|
||||
if amt <= 0:
|
||||
continue
|
||||
addr = wallets[idx]["address"]
|
||||
rows.append((addr, amt))
|
||||
|
||||
if not rows:
|
||||
print("No positive-amount rows in TSV.", file=sys.stderr)
|
||||
return 0
|
||||
|
||||
pk = os.environ.get("PRIVATE_KEY", "").strip() or None
|
||||
if args.execute and not pk:
|
||||
print("PRIVATE_KEY required for --execute", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
deployer = _deployer(pk)
|
||||
|
||||
mc = args.multicall
|
||||
token = args.token
|
||||
|
||||
all_chunks: list[list[tuple[str, int]]] = []
|
||||
for i in range(0, len(rows), args.chunk_size):
|
||||
all_chunks.append(rows[i : i + args.chunk_size])
|
||||
|
||||
start_b = max(0, args.start_batch)
|
||||
if args.max_batches > 0:
|
||||
end_b = min(len(all_chunks), start_b + args.max_batches)
|
||||
else:
|
||||
end_b = len(all_chunks)
|
||||
chunks = all_chunks[start_b:end_b]
|
||||
budget_raw = sum(amt for c in chunks for _, amt in c)
|
||||
|
||||
if not chunks:
|
||||
print("No batches in range.", file=sys.stderr)
|
||||
return 0
|
||||
|
||||
print(
|
||||
f"batches {start_b}..{end_b - 1} of {len(all_chunks)} transfers={sum(len(c) for c in chunks)} "
|
||||
f"budget_raw={budget_raw}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
if args.dry_run:
|
||||
try:
|
||||
allow = _allowance(token, deployer, mc, args.rpc_url)
|
||||
except Exception:
|
||||
allow = 0
|
||||
print(f"# allowance Multicall3: {allow} budget_this_run: {budget_raw}", file=sys.stderr)
|
||||
if allow < budget_raw:
|
||||
print(
|
||||
f"cast send {token} \"approve(address,uint256)\" {mc} {budget_raw} \\\n"
|
||||
f" --rpc-url \"$ETHEREUM_MAINNET_RPC\" --private-key \"$PRIVATE_KEY\" --gas-limit 120000",
|
||||
file=sys.stderr,
|
||||
)
|
||||
chunk = chunks[0]
|
||||
parts = []
|
||||
for addr, amt in chunk:
|
||||
data = _cast_calldata_transfer_from(deployer, addr, amt)
|
||||
parts.append(f"({token},false,{data})")
|
||||
tuple_str = "[" + ",".join(parts) + "]"
|
||||
calldata = _cast_calldata_aggregate3(tuple_str)
|
||||
gl = args.min_gas_per_batch + 65_000 * len(chunk)
|
||||
sample_hex = repo / "reports/status/ei-matrix-multicall3-dryrun-sample-batch.hex"
|
||||
sample_hex.write_text(calldata + "\n", encoding="utf-8")
|
||||
rel = os.path.relpath(str(sample_hex), str(repo))
|
||||
print(f"\n# sample batch 0 n={len(chunk)} gas_limit~{gl}", file=sys.stderr)
|
||||
print(f"# calldata written: {rel}", file=sys.stderr)
|
||||
print(
|
||||
f"cast send {mc} $(cat {rel}) --rpc-url \"$ETHEREUM_MAINNET_RPC\" \\\n"
|
||||
f" --private-key \"$PRIVATE_KEY\" --gas-limit {gl}"
|
||||
)
|
||||
print(f"\n# … {len(chunks)} batches total (chunk_size={args.chunk_size})", file=sys.stderr)
|
||||
return 0
|
||||
|
||||
assert pk is not None
|
||||
allow = _allowance(token, deployer, mc, args.rpc_url)
|
||||
if allow < budget_raw:
|
||||
print(f"Approving Multicall3 for {budget_raw} raw (was {allow})", file=sys.stderr)
|
||||
_send_cast_send(token, "approve(address,uint256)", [mc, str(budget_raw)], args.rpc_url, pk, "120000")
|
||||
time.sleep(2)
|
||||
allow2 = _allowance(token, deployer, mc, args.rpc_url)
|
||||
if allow2 < budget_raw:
|
||||
print(f"Allowance insufficient: {allow2} < {budget_raw}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
progress_path = repo / args.progress_file
|
||||
progress_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for bi, chunk in enumerate(chunks):
|
||||
global_batch_idx = start_b + bi
|
||||
parts = []
|
||||
for addr, amt in chunk:
|
||||
data = _cast_calldata_transfer_from(deployer, addr, amt)
|
||||
parts.append(f"({token},false,{data})")
|
||||
tuple_str = "[" + ",".join(parts) + "]"
|
||||
calldata = _cast_calldata_aggregate3(tuple_str)
|
||||
|
||||
gas_est = args.min_gas_per_batch
|
||||
try:
|
||||
gas_est = _estimate_gas(deployer, mc, calldata, args.rpc_url)
|
||||
except Exception as e:
|
||||
print(f"[warn] estimateGas failed, fallback: {e}", file=sys.stderr)
|
||||
gas_est = 70_000 * len(chunk) + 400_000
|
||||
|
||||
gas_with_headroom = max(args.min_gas_per_batch, (gas_est * args.gas_headroom_bps + 9999) // 10000)
|
||||
print(f"Batch {global_batch_idx}: n={len(chunk)} estimate={gas_est} limit={gas_with_headroom}", file=sys.stderr)
|
||||
|
||||
_send_raw_calldata(mc, calldata, args.rpc_url, pk, str(gas_with_headroom))
|
||||
progress_path.write_text(f"{global_batch_idx}\n", encoding="utf-8")
|
||||
time.sleep(1)
|
||||
|
||||
print("Done.", file=sys.stderr)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
299
scripts/lib/ei_matrix_onchain_readiness_audit.py
Normal file
299
scripts/lib/ei_matrix_onchain_readiness_audit.py
Normal file
@@ -0,0 +1,299 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
On-chain readiness audit for EI matrix wallets (config/pmm-soak-wallet-grid.json).
|
||||
|
||||
Queries ERC-20 balanceOf for each address on one or both chains:
|
||||
- Ethereum mainnet cWUSDC (default from env CWUSDC_MAINNET)
|
||||
- Chain 138 cUSDC (default canonical CompliantUSDC)
|
||||
|
||||
Use for strength profiling: segment by class/lpbca via --report-by-class, find gaps vs thresholds.
|
||||
|
||||
Environment (optional defaults for thresholds):
|
||||
EI_MATRIX_AUDIT_MIN_MAINNET_RAW, EI_MATRIX_AUDIT_MIN_138_RAW, EI_MATRIX_AUDIT_WORKERS
|
||||
|
||||
Examples:
|
||||
python3 scripts/lib/ei_matrix_onchain_readiness_audit.py --mainnet-only --min-mainnet-raw 1
|
||||
python3 scripts/lib/ei_matrix_onchain_readiness_audit.py --both \\
|
||||
--shard-size 400 --min-mainnet-raw 12000000 --min-138-raw 0 --workers 3 \\
|
||||
--report-by-class --json-out reports/status/ei-matrix-readiness-audit-latest.json
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
|
||||
# balanceOf(address) selector
|
||||
BALANCE_OF = bytes.fromhex("70a08231")
|
||||
ADDR_PAD = 12 * b"\x00"
|
||||
|
||||
|
||||
def encode_balance_of_call(addr: str) -> str:
|
||||
a = addr.lower().removeprefix("0x")
|
||||
if len(a) != 40:
|
||||
raise ValueError(f"bad address {addr}")
|
||||
data = BALANCE_OF + ADDR_PAD + bytes.fromhex(a)
|
||||
return "0x" + data.hex()
|
||||
|
||||
|
||||
def rpc_eth_call(to: str, data: str, rpc_url: str, timeout: float = 30.0) -> str:
|
||||
body = json.dumps(
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "eth_call",
|
||||
"params": [{"to": to, "data": data}, "latest"],
|
||||
}
|
||||
).encode()
|
||||
req = urllib.request.Request(rpc_url, data=body, headers={"Content-Type": "application/json"}, method="POST")
|
||||
with urllib.request.urlopen(req, timeout=timeout) as r:
|
||||
j = json.loads(r.read().decode())
|
||||
if "error" in j:
|
||||
raise RuntimeError(str(j["error"]))
|
||||
return j.get("result") or "0x0"
|
||||
|
||||
|
||||
def hex_to_int(h: str) -> int:
|
||||
h = h.strip()
|
||||
if not h or h == "0x":
|
||||
return 0
|
||||
return int(h, 16)
|
||||
|
||||
|
||||
def collect_rows_for_slice(
|
||||
slice_items: list[tuple[int, dict]],
|
||||
*,
|
||||
do_main: bool,
|
||||
do_138: bool,
|
||||
mainnet_rpc: str,
|
||||
chain138_rpc: str,
|
||||
mainnet_token: str,
|
||||
chain138_cusdc: str,
|
||||
workers: int,
|
||||
) -> list[dict]:
|
||||
def fetch_one(item: tuple[int, dict]) -> tuple[int, dict, int, int]:
|
||||
idx, w = item
|
||||
addr = w["address"]
|
||||
mbal, bbal = 0, 0
|
||||
if do_main:
|
||||
calldata = encode_balance_of_call(addr)
|
||||
res = rpc_eth_call(mainnet_token.lower(), calldata, mainnet_rpc)
|
||||
mbal = hex_to_int(res)
|
||||
if do_138:
|
||||
calldata = encode_balance_of_call(addr)
|
||||
res = rpc_eth_call(chain138_cusdc.lower(), calldata, chain138_rpc)
|
||||
bbal = hex_to_int(res)
|
||||
return idx, w, mbal, bbal
|
||||
|
||||
rows: list[dict] = []
|
||||
with ThreadPoolExecutor(max_workers=max(1, workers)) as ex:
|
||||
futs = [ex.submit(fetch_one, it) for it in slice_items]
|
||||
for fut in as_completed(futs):
|
||||
idx, w, mbal, bbal = fut.result()
|
||||
cls = int(w.get("class", 0))
|
||||
row = {
|
||||
"linearIndex": idx,
|
||||
"address": w["address"],
|
||||
"cellId": w.get("cellId"),
|
||||
"class": cls,
|
||||
"mainnetCwusdcRaw": mbal if do_main else None,
|
||||
"chain138CusdcRaw": bbal if do_138 else None,
|
||||
}
|
||||
rows.append(row)
|
||||
return rows
|
||||
|
||||
|
||||
def write_indices(path: Path, indices: list[int]) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text("\n".join(str(i) for i in indices) + ("\n" if indices else ""), encoding="utf-8")
|
||||
|
||||
|
||||
def main() -> int:
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("--grid", default="config/pmm-soak-wallet-grid.json")
|
||||
ap.add_argument("--offset", type=int, default=0)
|
||||
ap.add_argument("--limit", type=int, default=0, help="0 = all from offset to grid end")
|
||||
ap.add_argument(
|
||||
"--shard-size",
|
||||
type=int,
|
||||
default=int(os.environ.get("EI_MATRIX_AUDIT_SHARD_SIZE", "0")),
|
||||
help="If >0, query in sequential shards of this size (eases RPC load). 0 = single batch.",
|
||||
)
|
||||
ap.add_argument("--workers", type=int, default=int(os.environ.get("EI_MATRIX_AUDIT_WORKERS", "4")))
|
||||
ap.add_argument("--mainnet-only", action="store_true")
|
||||
ap.add_argument("--chain138-only", action="store_true")
|
||||
ap.add_argument("--both", action="store_true")
|
||||
ap.add_argument("--mainnet-rpc", default=os.environ.get("ETHEREUM_MAINNET_RPC") or os.environ.get("RPC_URL_1") or "")
|
||||
ap.add_argument("--chain138-rpc", default=os.environ.get("RPC_URL_138") or os.environ.get("CHAIN138_PUBLIC_RPC_URL") or "")
|
||||
ap.add_argument("--mainnet-token", default=os.environ.get("CWUSDC_MAINNET", "0x2de5F116bFcE3d0f922d9C8351e0c5Fc24b9284a"))
|
||||
ap.add_argument(
|
||||
"--chain138-cusdc",
|
||||
default=os.environ.get("CUSDC_CHAIN138", "0xf22258f57794CC8E06237084b353Ab30fFfa640b"),
|
||||
)
|
||||
ap.add_argument(
|
||||
"--min-mainnet-raw",
|
||||
type=int,
|
||||
default=int(os.environ.get("EI_MATRIX_AUDIT_MIN_MAINNET_RAW", "0")),
|
||||
help="fail wallets strictly below this (mainnet); env EI_MATRIX_AUDIT_MIN_MAINNET_RAW",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--min-138-raw",
|
||||
type=int,
|
||||
default=int(os.environ.get("EI_MATRIX_AUDIT_MIN_138_RAW", "0")),
|
||||
help="fail wallets strictly below this (138); env EI_MATRIX_AUDIT_MIN_138_RAW",
|
||||
)
|
||||
ap.add_argument("--report-by-class", action="store_true", help="aggregate counts by matrix class 0..5")
|
||||
ap.add_argument("--json-out", default="", help="write full per-wallet rows + summary")
|
||||
ap.add_argument(
|
||||
"--gaps-mainnet-out",
|
||||
default="",
|
||||
help="write newline-separated linear indices below mainnet minimum (only if mainnet queried)",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--gaps-138-out",
|
||||
default="",
|
||||
help="write newline-separated linear indices below 138 minimum (only if 138 queried)",
|
||||
)
|
||||
ap.add_argument("--max-list", type=int, default=200, help="max gap indices to print on stderr")
|
||||
args = ap.parse_args()
|
||||
|
||||
repo = Path(__file__).resolve().parents[2]
|
||||
grid_path = repo / args.grid if not os.path.isabs(args.grid) else Path(args.grid)
|
||||
data = json.loads(grid_path.read_text(encoding="utf-8"))
|
||||
wallets: list[dict] = data["wallets"]
|
||||
n = len(wallets)
|
||||
scan_end = n if args.limit <= 0 else min(n, args.offset + args.limit)
|
||||
scan_start = args.offset
|
||||
if scan_start < 0 or scan_start > n:
|
||||
print("Invalid --offset", file=sys.stderr)
|
||||
return 2
|
||||
if scan_end < scan_start:
|
||||
print("Invalid --limit / range", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
do_main = args.mainnet_only or args.both
|
||||
do_138 = args.chain138_only or args.both
|
||||
if not do_main and not do_138:
|
||||
print("Specify --mainnet-only, --chain138-only, or --both", file=sys.stderr)
|
||||
return 2
|
||||
if do_main and not args.mainnet_rpc:
|
||||
print("Need --mainnet-rpc or ETHEREUM_MAINNET_RPC / RPC_URL_1", file=sys.stderr)
|
||||
return 2
|
||||
if do_138 and not args.chain138_rpc:
|
||||
print("Need --chain138-rpc or RPC_URL_138", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
shard = max(0, args.shard_size)
|
||||
rows: list[dict] = []
|
||||
if shard <= 0:
|
||||
slice_items = list(enumerate(wallets[scan_start:scan_end], start=scan_start))
|
||||
rows = collect_rows_for_slice(
|
||||
slice_items,
|
||||
do_main=do_main,
|
||||
do_138=do_138,
|
||||
mainnet_rpc=args.mainnet_rpc,
|
||||
chain138_rpc=args.chain138_rpc,
|
||||
mainnet_token=args.mainnet_token,
|
||||
chain138_cusdc=args.chain138_cusdc,
|
||||
workers=args.workers,
|
||||
)
|
||||
else:
|
||||
for start in range(scan_start, scan_end, shard):
|
||||
chunk_end = min(scan_end, start + shard)
|
||||
slice_items = list(enumerate(wallets[start:chunk_end], start=start))
|
||||
print(f"Shard {start}..{chunk_end} ({len(slice_items)} wallets)", file=sys.stderr)
|
||||
rows.extend(
|
||||
collect_rows_for_slice(
|
||||
slice_items,
|
||||
do_main=do_main,
|
||||
do_138=do_138,
|
||||
mainnet_rpc=args.mainnet_rpc,
|
||||
chain138_rpc=args.chain138_rpc,
|
||||
mainnet_token=args.mainnet_token,
|
||||
chain138_cusdc=args.chain138_cusdc,
|
||||
workers=args.workers,
|
||||
)
|
||||
)
|
||||
|
||||
rows.sort(key=lambda r: r["linearIndex"])
|
||||
|
||||
by_class: dict[int, dict] = {i: {"n": 0, "mainnet_below": 0, "138_below": 0} for i in range(6)}
|
||||
if args.report_by_class:
|
||||
for r in rows:
|
||||
cls = int(r.get("class", 0))
|
||||
if cls not in by_class:
|
||||
continue
|
||||
by_class[cls]["n"] += 1
|
||||
if do_main and r["mainnetCwusdcRaw"] < args.min_mainnet_raw:
|
||||
by_class[cls]["mainnet_below"] += 1
|
||||
if do_138 and r["chain138CusdcRaw"] < args.min_138_raw:
|
||||
by_class[cls]["138_below"] += 1
|
||||
|
||||
gaps_main: list[int] = []
|
||||
gaps_138: list[int] = []
|
||||
for r in rows:
|
||||
if do_main and r["mainnetCwusdcRaw"] < args.min_mainnet_raw:
|
||||
gaps_main.append(r["linearIndex"])
|
||||
if do_138 and r["chain138CusdcRaw"] < args.min_138_raw:
|
||||
gaps_138.append(r["linearIndex"])
|
||||
|
||||
summary = {
|
||||
"gridPath": str(grid_path),
|
||||
"slice": {"offset": scan_start, "endExclusive": scan_end, "count": len(rows)},
|
||||
"shardSize": shard if shard > 0 else None,
|
||||
"mainnet": {
|
||||
"token": args.mainnet_token if do_main else None,
|
||||
"rpc": args.mainnet_rpc[:48] + "…" if do_main and len(args.mainnet_rpc) > 48 else args.mainnet_rpc,
|
||||
"minRaw": args.min_mainnet_raw,
|
||||
"belowMin": len(gaps_main),
|
||||
},
|
||||
"chain138": {
|
||||
"token": args.chain138_cusdc if do_138 else None,
|
||||
"minRaw": args.min_138_raw,
|
||||
"belowMin": len(gaps_138),
|
||||
},
|
||||
"byClass": by_class if args.report_by_class else None,
|
||||
}
|
||||
|
||||
print(json.dumps(summary, indent=2))
|
||||
if gaps_main:
|
||||
print(
|
||||
f"\nMainnet cWUSDC below min ({args.min_mainnet_raw}) — {len(gaps_main)} wallets "
|
||||
f"(first {args.max_list} indices):",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(", ".join(str(x) for x in gaps_main[: args.max_list]), file=sys.stderr)
|
||||
if gaps_138:
|
||||
print(
|
||||
f"\nChain 138 cUSDC below min ({args.min_138_raw}) — {len(gaps_138)} wallets "
|
||||
f"(first {args.max_list} indices):",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(", ".join(str(x) for x in gaps_138[: args.max_list]), file=sys.stderr)
|
||||
|
||||
if args.json_out:
|
||||
outp = repo / args.json_out if not os.path.isabs(args.json_out) else Path(args.json_out)
|
||||
outp.parent.mkdir(parents=True, exist_ok=True)
|
||||
outp.write_text(json.dumps({"summary": summary, "rows": rows}, indent=2), encoding="utf-8")
|
||||
print(f"\nWrote {outp}", file=sys.stderr)
|
||||
|
||||
if do_main and args.gaps_mainnet_out:
|
||||
gp = repo / args.gaps_mainnet_out if not os.path.isabs(args.gaps_mainnet_out) else Path(args.gaps_mainnet_out)
|
||||
write_indices(gp, gaps_main)
|
||||
print(f"Wrote mainnet gap indices ({len(gaps_main)}): {gp}", file=sys.stderr)
|
||||
if do_138 and args.gaps_138_out:
|
||||
gp = repo / args.gaps_138_out if not os.path.isabs(args.gaps_138_out) else Path(args.gaps_138_out)
|
||||
write_indices(gp, gaps_138)
|
||||
print(f"Wrote 138 gap indices ({len(gaps_138)}): {gp}", file=sys.stderr)
|
||||
|
||||
fail = bool(gaps_main or gaps_138)
|
||||
return 1 if fail else 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
22
scripts/lib/find-repo-files.sh
Executable file
22
scripts/lib/find-repo-files.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
# List files under the repo root without descending into node_modules or .git
|
||||
# (avoids hanging on huge dependency trees).
|
||||
#
|
||||
# Usage:
|
||||
# scripts/lib/find-repo-files.sh
|
||||
# scripts/lib/find-repo-files.sh -name '*.md'
|
||||
# scripts/lib/find-repo-files.sh \( -name '*.ts' -o -name '*.tsx' \)
|
||||
#
|
||||
# Example — search text without scanning node_modules (prefer narrowing extensions;
|
||||
# piping every file to grep can still be slow on very large trees):
|
||||
# scripts/lib/find-repo-files.sh -name '*.md' | xargs grep -l 'pattern' 2>/dev/null
|
||||
# scripts/lib/find-repo-files.sh \( -name '*.md' -o -name '*.sh' -o -name '*.ts' -o -name '*.json' \) \\
|
||||
# | xargs grep -l 'pattern' 2>/dev/null
|
||||
|
||||
set -euo pipefail
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
exec find "$ROOT" \
|
||||
\( -name node_modules -o -name .git \) -prune -o \
|
||||
-type f ${1+"$@"} -print
|
||||
@@ -172,12 +172,13 @@ export DBIS_CORE_DIR="${DBIS_CORE_DIR:-${PROJECT_ROOT}/dbis_core}"
|
||||
get_host_for_vmid() {
|
||||
local vmid="$1"
|
||||
case "$vmid" in
|
||||
7800|7801|7802|7803|7804|7805|7806) echo "${PROXMOX_HOST_R630_01}";;
|
||||
7800|7801|7802|7803|7805|7806) echo "${PROXMOX_HOST_R630_01}";;
|
||||
7804) echo "${PROXMOX_HOST_R630_04:-192.168.11.14}";;
|
||||
10130|10150|10151|106|107|108|10000|10001|10020|10100|10101|10120|10203|10233|10235) echo "${PROXMOX_HOST_R630_01}";;
|
||||
1000|1001|1002|1500|1501|1502|2101|2103) echo "${PROXMOX_HOST_R630_01}";;
|
||||
1003|1004|1503|1504|1505|1506|1507|1509|1510|2102|2301|2304|2400|2402|2403) echo "${PROXMOX_HOST_R630_03}";;
|
||||
1508) echo "${PROXMOX_HOST_R630_04}";;
|
||||
5700) echo "${PROXMOX_HOST_R630_04}";;
|
||||
5700|5701) echo "${PROXMOX_HOST_R630_04:-192.168.11.14}";;
|
||||
5000|7810|2201|2303|2305|2306|2307|2308|2401|6200|6201|6202|6203|6204|6205|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";;
|
||||
2420|2430|2440|2460|2470|2480) echo "${PROXMOX_HOST_R630_01}";;
|
||||
5400|5401|5402|5403|5410|5411|5412|5413|5414|5415|5416|5417|5418|5419|5420|5421|5422|5423|5424|5425|5440|5441|5442|5443|5444|5445|5446|5447|5448|5449|5450|5451|5452|5453|5454|5455|5470|5471|5472|5473|5474|5475|5476) echo "${PROXMOX_HOST_R630_02}";;
|
||||
|
||||
94
scripts/lib/mev-protection.sh
Executable file
94
scripts/lib/mev-protection.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env bash
|
||||
# Shared protected-broadcast helpers for Engine X Mainnet actions.
|
||||
#
|
||||
# Source after scripts/lib/load-project-env.sh. Reads use the normal public RPC;
|
||||
# sensitive writes should go through mev_cast_send so operators cannot
|
||||
# accidentally broadcast quote-defense swaps through the public mempool.
|
||||
|
||||
mev_private_rpc_key() {
|
||||
local key value
|
||||
for key in ENGINE_X_PRIVATE_TX_RPC MEV_BLOCKER_RPC_URL FLASHBOTS_RPC_URL BLOXROUTE_RPC_URL BLINK_RPC_URL; do
|
||||
value="${!key-}"
|
||||
if [[ -n "${value}" ]]; then
|
||||
printf '%s\n' "${key}"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
mev_has_private_rpc() {
|
||||
mev_private_rpc_key >/dev/null 2>&1
|
||||
}
|
||||
|
||||
mev_write_rpc_label() {
|
||||
local key
|
||||
if key="$(mev_private_rpc_key)"; then
|
||||
case "${key}" in
|
||||
ENGINE_X_PRIVATE_TX_RPC) printf '%s\n' "${ENGINE_X_PRIVATE_TX_RPC_LABEL:-engine-x-private-tx-rpc}" ;;
|
||||
MEV_BLOCKER_RPC_URL) printf '%s\n' "mev-blocker" ;;
|
||||
FLASHBOTS_RPC_URL) printf '%s\n' "flashbots" ;;
|
||||
BLOXROUTE_RPC_URL) printf '%s\n' "bloxroute" ;;
|
||||
BLINK_RPC_URL) printf '%s\n' "blink" ;;
|
||||
*) printf '%s\n' "${key}" ;;
|
||||
esac
|
||||
return 0
|
||||
fi
|
||||
printf '%s\n' "public-mainnet-rpc"
|
||||
}
|
||||
|
||||
mev_write_rpc_url() {
|
||||
local key
|
||||
if key="$(mev_private_rpc_key)"; then
|
||||
printf '%s\n' "${!key}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "${ENGINE_X_MEV_PROTECTION:-1}" == "1" && "${ENGINE_X_ALLOW_PUBLIC_BROADCAST:-0}" != "1" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "${ETHEREUM_MAINNET_RPC:-}" ]]; then
|
||||
return 1
|
||||
fi
|
||||
printf '%s\n' "${ETHEREUM_MAINNET_RPC}"
|
||||
}
|
||||
|
||||
mev_require_private_for_action() {
|
||||
local action="${1:-engine-x-sensitive-action}"
|
||||
if [[ "${ENGINE_X_MEV_PROTECTION:-1}" != "1" ]]; then
|
||||
echo "WARN: MEV protection disabled for ${action} (ENGINE_X_MEV_PROTECTION=0)." >&2
|
||||
return 0
|
||||
fi
|
||||
if mev_has_private_rpc; then
|
||||
return 0
|
||||
fi
|
||||
if [[ "${ENGINE_X_ALLOW_PUBLIC_BROADCAST:-0}" == "1" ]]; then
|
||||
echo "WARN: public broadcast explicitly allowed for ${action} (ENGINE_X_ALLOW_PUBLIC_BROADCAST=1)." >&2
|
||||
return 0
|
||||
fi
|
||||
|
||||
cat >&2 <<EOF
|
||||
MEV protected broadcast is required for ${action}, but no private/protected RPC is configured.
|
||||
Set one of:
|
||||
ENGINE_X_PRIVATE_TX_RPC
|
||||
MEV_BLOCKER_RPC_URL
|
||||
FLASHBOTS_RPC_URL
|
||||
BLOXROUTE_RPC_URL
|
||||
BLINK_RPC_URL
|
||||
|
||||
For an intentional public-mempool canary only, set ENGINE_X_ALLOW_PUBLIC_BROADCAST=1.
|
||||
EOF
|
||||
return 1
|
||||
}
|
||||
|
||||
mev_cast_send() {
|
||||
local target="${1:?target is required}"
|
||||
shift
|
||||
local rpc
|
||||
if ! rpc="$(mev_write_rpc_url)"; then
|
||||
echo "Unable to choose a write RPC; protected RPC required or ETHEREUM_MAINNET_RPC missing." >&2
|
||||
return 1
|
||||
fi
|
||||
cast send "${target}" "$@" --private-key "${PRIVATE_KEY:?PRIVATE_KEY is required}" --rpc-url "${rpc}"
|
||||
}
|
||||
59
scripts/lib/require-proxmox-ssh-for-pct.sh
Normal file
59
scripts/lib/require-proxmox-ssh-for-pct.sh
Normal file
@@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env bash
|
||||
# Resolve PROXMOX_HOST for pct-over-SSH so operator workstations do not run pct by mistake.
|
||||
#
|
||||
# Usage (after VMID is set):
|
||||
# PROXMOX_MONOREPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" # proxmox repo root
|
||||
# source "${PROXMOX_MONOREPO_ROOT}/scripts/lib/require-proxmox-ssh-for-pct.sh"
|
||||
# require_proxmox_ssh_for_pct
|
||||
#
|
||||
# Env:
|
||||
# PROXMOX_HOST If set, use this host (ssh root@$PROXMOX_HOST … pct …).
|
||||
# VMID Used with get_host_for_vmid when PROXMOX_HOST is unset.
|
||||
# PROXMOX_MONOREPO_ROOT Proxmox monorepo root (directory containing scripts/lib/load-project-env.sh).
|
||||
# PROJECT_ROOT Alternative to PROXMOX_MONOREPO_ROOT when sourcing load-project-env.
|
||||
# DEPLOY_PCT_ON_LOCAL_PVE Set to 1 only on a real Proxmox node (/etc/pve/.members) to run local pct
|
||||
# without SSH (hypervisor shell only).
|
||||
|
||||
require_proxmox_ssh_for_pct() {
|
||||
local vmid="${VMID:-}"
|
||||
|
||||
if [[ "${DEPLOY_PCT_ON_LOCAL_PVE:-0}" == "1" ]]; then
|
||||
if [[ -r /etc/pve/.members ]]; then
|
||||
export PROXMOX_HOST=""
|
||||
echo "NOTE: DEPLOY_PCT_ON_LOCAL_PVE=1 — using pct on this Proxmox node (no SSH)." >&2
|
||||
return 0
|
||||
fi
|
||||
echo "ERROR: DEPLOY_PCT_ON_LOCAL_PVE=1 but this host is not a Proxmox cluster member (/etc/pve/.members missing)." >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -n "${PROXMOX_HOST:-}" ]]; then
|
||||
echo "Using Proxmox target: ssh root@${PROXMOX_HOST} (VMID ${vmid:-n/a})" >&2
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -z "$vmid" ]]; then
|
||||
echo "ERROR: PROXMOX_HOST is unset and VMID is empty — cannot choose a Proxmox host." >&2
|
||||
echo " Set PROXMOX_HOST (e.g. 192.168.11.12) or VMID, or run on a PVE node with DEPLOY_PCT_ON_LOCAL_PVE=1." >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local root="${PROXMOX_MONOREPO_ROOT:-${PROJECT_ROOT:-}}"
|
||||
if [[ -z "$root" || ! -f "$root/scripts/lib/load-project-env.sh" ]]; then
|
||||
echo "ERROR: Proxmox monorepo root not found (expected scripts/lib/load-project-env.sh under PROXMOX_MONOREPO_ROOT or PROJECT_ROOT)." >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
PROJECT_ROOT="$root" source "$root/scripts/lib/load-project-env.sh"
|
||||
|
||||
local chosen
|
||||
chosen="$(get_host_for_vmid "$vmid")"
|
||||
if [[ -z "$chosen" ]]; then
|
||||
echo "ERROR: get_host_for_vmid returned empty for VMID=$vmid" >&2
|
||||
return 1
|
||||
fi
|
||||
export PROXMOX_HOST="$chosen"
|
||||
echo "Auto-selected Proxmox host from VMID ${vmid}: ssh root@${PROXMOX_HOST}" >&2
|
||||
return 0
|
||||
}
|
||||
3
scripts/lib/requirements-solana-ops.txt
Normal file
3
scripts/lib/requirements-solana-ops.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
# Used by scripts/deployment/solana-transfer-native.py (sign + serialize only).
|
||||
# RPC calls use stdlib in scripts/lib/solana_jsonrpc.py (avoids solana-py sendTransaction parse panics on some hosts).
|
||||
solders>=0.21.0,<0.26
|
||||
188
scripts/lib/solana_jsonrpc.py
Normal file
188
scripts/lib/solana_jsonrpc.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""
|
||||
Minimal Solana JSON-RPC over HTTP (stdlib only).
|
||||
|
||||
Some public RPCs return a bare string for ``sendTransaction`` ``result`` without
|
||||
extra fields that ``solana-py``'s ``SendTransactionResp`` expects, which makes
|
||||
``Client.send_raw_transaction`` panic while deserializing (missing JSON field
|
||||
``data``). Use :func:`send_transaction_wire` for submission; keep ``solders``
|
||||
(or ``solana-py``) only for signing and local serialization.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from typing import Any
|
||||
|
||||
|
||||
DEFAULT_USER_AGENT = "proxmox-scripts/solana-jsonrpc/1.0"
|
||||
|
||||
|
||||
class SolanaJsonRpcError(RuntimeError):
|
||||
"""JSON-RPC error object or unexpected HTTP / parse failure."""
|
||||
|
||||
def __init__(self, message: str, *, payload: dict[str, Any] | None = None) -> None:
|
||||
super().__init__(message)
|
||||
self.payload = payload
|
||||
|
||||
|
||||
def post_json_rpc(
|
||||
rpc_url: str,
|
||||
method: str,
|
||||
params: list[Any],
|
||||
*,
|
||||
request_id: int = 1,
|
||||
timeout_s: float = 90.0,
|
||||
user_agent: str = DEFAULT_USER_AGENT,
|
||||
) -> dict[str, Any]:
|
||||
body = json.dumps(
|
||||
{"jsonrpc": "2.0", "id": request_id, "method": method, "params": params}
|
||||
).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
rpc_url,
|
||||
data=body,
|
||||
headers={"Content-Type": "application/json", "User-Agent": user_agent},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout_s) as resp:
|
||||
raw = resp.read().decode("utf-8")
|
||||
except urllib.error.HTTPError as e:
|
||||
try:
|
||||
detail = e.read().decode("utf-8", errors="replace")
|
||||
except Exception:
|
||||
detail = str(e)
|
||||
raise SolanaJsonRpcError(f"HTTP {e.code}: {detail}") from e
|
||||
|
||||
try:
|
||||
out: dict[str, Any] = json.loads(raw)
|
||||
except json.JSONDecodeError as e:
|
||||
raise SolanaJsonRpcError(f"invalid JSON from RPC: {raw[:500]!r}") from e
|
||||
|
||||
err = out.get("error")
|
||||
if err:
|
||||
raise SolanaJsonRpcError(f"RPC error: {err}", payload=out)
|
||||
return out
|
||||
|
||||
|
||||
def get_latest_blockhash(
|
||||
rpc_url: str, *, commitment: str = "confirmed", timeout_s: float = 30.0
|
||||
) -> str:
|
||||
out = post_json_rpc(
|
||||
rpc_url,
|
||||
"getLatestBlockhash",
|
||||
[{"commitment": commitment}],
|
||||
timeout_s=timeout_s,
|
||||
)
|
||||
try:
|
||||
return str(out["result"]["value"]["blockhash"])
|
||||
except (KeyError, TypeError) as e:
|
||||
raise SolanaJsonRpcError(f"unexpected getLatestBlockhash shape: {out!r}") from e
|
||||
|
||||
|
||||
def get_balance_lamports(
|
||||
rpc_url: str, pubkey_b58: str, *, commitment: str = "confirmed"
|
||||
) -> int:
|
||||
out = post_json_rpc(
|
||||
rpc_url,
|
||||
"getBalance",
|
||||
[pubkey_b58, {"commitment": commitment}],
|
||||
)
|
||||
try:
|
||||
return int(out["result"]["value"])
|
||||
except (KeyError, TypeError, ValueError) as e:
|
||||
raise SolanaJsonRpcError(f"unexpected getBalance shape: {out!r}") from e
|
||||
|
||||
|
||||
def send_transaction_wire(
|
||||
rpc_url: str,
|
||||
signed_wire: bytes,
|
||||
*,
|
||||
skip_preflight: bool = False,
|
||||
preflight_commitment: str = "confirmed",
|
||||
max_retries: int | None = None,
|
||||
timeout_s: float = 90.0,
|
||||
) -> str:
|
||||
"""
|
||||
Submit a fully signed legacy or versioned transaction (wire bytes).
|
||||
|
||||
Returns base58 transaction signature string from ``result``.
|
||||
"""
|
||||
opts: dict[str, Any] = {
|
||||
"encoding": "base64",
|
||||
"skipPreflight": skip_preflight,
|
||||
"preflightCommitment": preflight_commitment,
|
||||
}
|
||||
if max_retries is not None:
|
||||
opts["maxRetries"] = max_retries
|
||||
|
||||
params: list[Any] = [base64.b64encode(signed_wire).decode("ascii"), opts]
|
||||
out = post_json_rpc(rpc_url, "sendTransaction", params, timeout_s=timeout_s)
|
||||
result = out.get("result")
|
||||
if not isinstance(result, str):
|
||||
raise SolanaJsonRpcError(f"unexpected sendTransaction result: {out!r}")
|
||||
return result
|
||||
|
||||
|
||||
def get_signature_statuses(
|
||||
rpc_url: str,
|
||||
signatures: list[str],
|
||||
*,
|
||||
search_transaction_history: bool = False,
|
||||
) -> list[dict[str, Any] | None]:
|
||||
"""Return one status object (or null) per signature, same order as input."""
|
||||
if search_transaction_history:
|
||||
params: list[Any] = [signatures, {"searchTransactionHistory": True}]
|
||||
else:
|
||||
params = [signatures]
|
||||
out = post_json_rpc(rpc_url, "getSignatureStatuses", params)
|
||||
try:
|
||||
val = out["result"]["value"]
|
||||
except (KeyError, TypeError) as e:
|
||||
raise SolanaJsonRpcError(f"unexpected getSignatureStatuses shape: {out!r}") from e
|
||||
if not isinstance(val, list):
|
||||
raise SolanaJsonRpcError(f"unexpected getSignatureStatuses value: {val!r}")
|
||||
out_list: list[dict[str, Any] | None] = []
|
||||
for item in val:
|
||||
if item is None:
|
||||
out_list.append(None)
|
||||
elif isinstance(item, dict):
|
||||
out_list.append(item)
|
||||
else:
|
||||
raise SolanaJsonRpcError(f"unexpected status entry: {item!r}")
|
||||
return out_list
|
||||
|
||||
|
||||
def wait_until_signature_confirmed(
|
||||
rpc_url: str,
|
||||
signature: str,
|
||||
*,
|
||||
timeout_s: float = 90.0,
|
||||
poll_interval_s: float = 1.0,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Poll ``getSignatureStatuses`` until the signature has a terminal ``err`` or
|
||||
reaches ``confirmationStatus`` of ``confirmed`` / ``finalized``.
|
||||
"""
|
||||
deadline = time.monotonic() + timeout_s
|
||||
last: dict[str, Any] | None = None
|
||||
while time.monotonic() < deadline:
|
||||
statuses = get_signature_statuses(rpc_url, [signature])
|
||||
st = statuses[0] if statuses else None
|
||||
last = st
|
||||
if st is None:
|
||||
time.sleep(poll_interval_s)
|
||||
continue
|
||||
err = st.get("err")
|
||||
if err:
|
||||
raise SolanaJsonRpcError(f"transaction failed: err={err!r}", payload=st)
|
||||
conf = st.get("confirmationStatus")
|
||||
if conf in ("confirmed", "finalized"):
|
||||
return st
|
||||
time.sleep(poll_interval_s)
|
||||
raise SolanaJsonRpcError(
|
||||
f"timeout waiting for confirmation of {signature!r}; last={last!r}"
|
||||
)
|
||||
Reference in New Issue
Block a user