From 1f44a50a258d618fcfe99f503412c3f8de3511e6 Mon Sep 17 00:00:00 2001 From: defiQUG Date: Mon, 11 May 2026 10:22:59 -0700 Subject: [PATCH] feat(it-ops): cluster live inventory + QEMU ipconfig LAN IPs Add scripts/it-ops export pipeline (collect_inventory_remote, compute_ipam_drift) and proxmox_guest_lan_ips parser for ipconfig* and all net* interfaces. Reconcile ALL_VMIDS, ip-addresses.conf, and operational template with live VMID/IP data; Order portal env vars; DBIS node matrix; inventory helpers. Track latest reports/status/live_inventory.json and drift.json (137 guests, no duplicate LAN IPs). Document export in AGENTS.md. Co-authored-by: Cursor --- AGENTS.md | 14 +- config/ip-addresses.conf | 4 + config/proxmox-operational-template.json | 44 +- docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md | 8 +- ...XMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md | 8 +- docs/04-configuration/ALL_VMIDS_ENDPOINTS.md | 120 +- reports/status/drift.json | 156 ++ reports/status/live_inventory.json | 1380 +++++++++++++++++ scripts/comprehensive-proxmox-inventory.py | 21 +- scripts/it-ops/compute_ipam_drift.py | 250 +++ .../it-ops/export-live-inventory-and-drift.sh | 72 + .../it-ops/lib/collect_inventory_remote.py | 162 ++ scripts/lib/proxmox_guest_lan_ips.py | 113 ++ ...-containers-privileged-and-complete-all.sh | 6 +- 14 files changed, 2284 insertions(+), 74 deletions(-) create mode 100644 reports/status/drift.json create mode 100644 reports/status/live_inventory.json create mode 100755 scripts/it-ops/compute_ipam_drift.py create mode 100755 scripts/it-ops/export-live-inventory-and-drift.sh create mode 100755 scripts/it-ops/lib/collect_inventory_remote.py create mode 100644 scripts/lib/proxmox_guest_lan_ips.py diff --git a/AGENTS.md b/AGENTS.md index cbc3fc7a..9c47b447 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -11,6 +11,10 @@ Orchestration for Proxmox VE, Chain 138 (`smom-dbis-138/`), explorers, NPMplus, | Need | Location | |------|-----------| | Doc index | `docs/MASTER_INDEX.md` | +| Master reference — token / stablecoin launch (“Bible from Nathan”) | `docs/00-meta/BIBLE_FROM_NATHAN_TOKEN_LAUNCH_RESOURCE_COMPENDIUM.md` — cross-cutting institutional compendium (regulation, custody, banking, tooling, audits, listings, checklists); use with Chain 138 canonicals below | +| Master reference — MetaMask Money/mUSD ↔ GRU, provider cross-links, DefiLlama DODO `dfio_meta_main` TVL | `docs/00-meta/METAMASK_GRU_DEFILLAMA_CHAIN138_MASTER_REFERENCE.md` — replay steps, doc cross-links, DefiLlama-Adapters fork/PR **#19198**, touchpoints JSON maintenance | +| DefiLlama ↔ Chain 138 (TVL + optional metrics) | `docs/04-configuration/defillama/CHAIN138_DEFILLAMA_ECOSYSTEM_MAP.md`, `config/defillama-chain138-touchpoints.json`; methodology hub [docs.llama.fi](https://docs.llama.fi/) | +| Optional Cosmos / IBC / Noble / Osmosis / CosmWasm → Chain 138 | `docs/11-references/COSMOS_ECOSYSTEM_CHAIN138_OPTIONAL_INTEGRATIONS_RUNBOOK.md` (streams A–E); templates `config/cosmos-chain138-optional/`; **gaps audit** `docs/11-references/COSMOS_CHAIN138_GAPS_AND_INCONSISTENCIES.md` | | Canonical ecosystem master plan | `docs/02-architecture/DBIS_ECOSYSTEM_TECHNICAL_MASTER_PLAN.md` — umbrella root; subordinate roots: `dbis_chain_138_technical_master_plan.md`, `docs/03-deployment/DBIS_RTGS_MASTER_PLAN_IMPLEMENTATION_TRACKER.md`, `docs/04-configuration/universal-resource-activation/URA_MANIFEST_AUTOMATION_IMPLEMENTATION_TRACKER.md` | | Treasury / EMI / wallet / VA master plan | `docs/02-architecture/GOVERNMENT_TREASURY_EMI_WALLET_MASTER_PLAN.md` — government treasury, EMIs, digital wallets, virtual accounts (incl. Tatum-style), Rail vs RTGS gates | | Universal resource activation (manifest, CI, Phoenix) | `UNIVERSAL_RESOURCE_WIRING.md`, `URA_MANIFEST_AUTOMATION_IMPLEMENTATION_TRACKER.md`, `URA_OPERATIONAL_READINESS_CHECKLIST.md` (under `docs/04-configuration/universal-resource-activation/`); `config/universal-resource-activation/{manifest.json,policy-profiles.json,integration/}`; `pnpm ura:ops-readiness` / `ura:ops-readiness:full`, `ura:production-ready` / `ura:production-ready:connectivity`, `ura:validate`, `ura:validate-profiles`, `ura:merge-manifest`, `ura:validate-ledger-mapping`, `ura:writer:ledger`, `ura:writer:settlement`, `ura:profile-hash`, `ura:validate-closure`, `ura:keccak`, `ura:smoke`; `URA_STRICT_CLOSURE` / Gitea `vars.URA_STRICT_CLOSURE`; `smom-dbis-138/contracts/universal-resource/PolicyProfileRegistry.sol` (scoped forge test); Phoenix `PUBLIC_V1_NO_PARTNER_KEY_PATHS` | @@ -18,6 +22,7 @@ Orchestration for Proxmox VE, Chain 138 (`smom-dbis-138/`), explorers, NPMplus, | cXAUC/cXAUT unit | 1 full token = 1 troy oz Au — `docs/11-references/EXPLORER_TOKEN_LIST_CROSSCHECK.md` (section 5.1) | | PMM mesh 6s tick | `smom-dbis-138/scripts/reserve/pmm-mesh-6s-automation.sh` — `docs/integration/ORACLE_AND_KEEPER_CHAIN138.md` (PMM mesh automation) | | VMID / IP / FQDN | `docs/04-configuration/ALL_VMIDS_ENDPOINTS.md` | +| Live guest inventory + IPAM drift (LAN, seed **r630-01**) | `bash scripts/it-ops/export-live-inventory-and-drift.sh` → `reports/status/live_inventory.json`, `reports/status/drift.json` (exit **2** only on duplicate guest IPs). Collector parses QEMU **`ipconfig*`** and LXC **`net*`** via `scripts/lib/proxmox_guest_lan_ips.py`. | | Ops template + JSON | `docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md`, `config/proxmox-operational-template.json` | | Live vs template (read-only SSH) | `bash scripts/verify/audit-proxmox-operational-template.sh` | | Config validation | `bash scripts/validation/validate-config-files.sh` | @@ -28,18 +33,25 @@ Orchestration for Proxmox VE, Chain 138 (`smom-dbis-138/`), explorers, NPMplus, | Submodule + explorer remotes | `docs/00-meta/SUBMODULE_HYGIENE.md` — `mcp-proxmox` uses **Gitea** `https://gitea.d-bis.org/d-bis/mcp-proxmox.git` (not the old GitHub-only URL). `cross-chain-pmm-lps-publish` is a **worktree** of `cross-chain-pmm-lps`, not a submodule. | | smom-dbis-138 `.env` in bash scripts | Prefer `source smom-dbis-138/scripts/lib/deployment/dotenv.sh` + `load_deployment_env --repo-root "$PROJECT_ROOT"` (trims RPC URL line endings). From an interactive shell: `source smom-dbis-138/scripts/load-env.sh`. Proxmox root scripts: `source scripts/lib/load-project-env.sh` (also trims common RPC vars). | | Sankofa portal → CT 7801 (build + restart) | `./scripts/deployment/sync-sankofa-portal-7801.sh` (`--dry-run` first); sets `NEXTAUTH_URL` on CT via `sankofa-portal-ensure-nextauth-on-ct.sh` | +| Gov Portals (CT **7804** @ `IP_GOV_PORTALS_DEV`, r630-04): git pull + build + sync | `./scripts/deployment/sync-gov-portals-ct-7804-from-git.sh` — requires `GITEA_TOKEN` in `.env` (or `export GITEA_TOKEN=…`); optional `--skip-fetch`. See `docs/04-configuration/GOV_PORTALS_XOM_DEV_DEPLOYMENT.md`. | | CCIP relay (r630-01 host) | Unit: `config/systemd/ccip-relay.service` → `/etc/systemd/system/ccip-relay.service`; `systemctl enable --now ccip-relay` | | TsunamiSwap VM 5010 check | `./scripts/deployment/tsunamiswap-vm-5010-provision.sh` (inventory only until VM exists) | +| Solana native SOL (robust JSON-RPC submit) | `scripts/lib/solana_jsonrpc.py` (stdlib `sendTransaction`), `./scripts/deployment/solana-transfer-native.py` (sign with `solders`). Install: `pip install -r scripts/lib/requirements-solana-ops.txt`. Avoids solana-py `SendTransactionResp` parse failures on RPCs that return only a signature string. Env: `SOLANA_RPC_URL`, `SOLANA_KEYPAIR_PATH` via `source scripts/lib/load-project-env.sh`. | | The Order portal (`https://the-order.sankofa.nexus`) | OSJ management UI (secure auth); source repo **the_order** at `~/projects/the_order`. NPM upstream defaults to **order-haproxy** CT **10210** (`IP_ORDER_HAPROXY:80`); use `THE_ORDER_UPSTREAM_*` to point at the Sankofa portal if 10210 is down. Provision HAProxy: `scripts/deployment/provision-order-haproxy-10210.sh`. **`www.the-order.sankofa.nexus`** → **301** apex (same as www.sankofa / www.phoenix). | | Portal login + Keycloak systemd + `.env` (prints password once) | `./scripts/deployment/enable-sankofa-portal-login-7801.sh` (`--dry-run` first) | | Completable (no LAN) | `./scripts/run-completable-tasks-from-anywhere.sh` | +| **EI matrix → mainnet cWUSDC transfer** | `./scripts/deployment/send-cwusdc-ei-matrix-wallets.sh` — `transfer` from signer to grid slice (`--send-raw` / `--total-send-raw`); resume: `continue-cwusdc-ei-matrix-wallets.sh`. | +| **EI matrix top-up TSV from audit** | `scripts/verify/build-ei-matrix-cwusdc-topup-tsv-from-audit-json.sh` — rebuilds `ei-matrix-cwusdc-topup-*.tsv` from `ei-matrix-readiness-audit-latest.json`. | +| **EI matrix → Multicall3 cWUSDC (preferred)** | `./scripts/deployment/send-cwusdc-ei-matrix-multicall-batches.sh` — Multicall3 `aggregate3` + `transferFrom`; `EI_MATRIX_MC_CHUNK` (default 200). Core: `scripts/lib/ei_matrix_multicall3_cwusdc_batch.py`. Fallback: `send-cwusdc-ei-matrix-targeted.sh` (1 tx/wallet). Pipeline: `pipeline-ei-matrix-remediate-cwusdc-from-audit.sh --multicall`. | +| **EI matrix → mainnet cWUSDC mint** | `./scripts/deployment/pipeline-ei-matrix-mint-cwusdc.sh` — mints `CWUSDC_MAINNET` to each wallet in `config/pmm-soak-wallet-grid.json` (see `docs/03-deployment/EI_MATRIX_CWUSDC_MINT_PIPELINE.md`). Core: `mint-cwusdc-ei-matrix-wallets.sh`; resume: `continue-mint-cwusdc-ei-matrix-wallets.sh`. Not a 138 bridge. | +| **EI matrix on-chain readiness (cWUSDC / 138 cUSDC)** | `./scripts/verify/run-ei-matrix-full-readiness-audit.sh` — full grid, sharded (`--shard-size`, env `EI_MATRIX_AUDIT_*`), writes JSON + gap index files. Ad hoc: `./scripts/verify/audit-ei-matrix-onchain-readiness.sh`. Optional CI: `EI_MATRIX_ONCHAIN_AUDIT_CI=1` in `scripts/verify/run-all-validation.sh`. Core: `scripts/lib/ei_matrix_onchain_readiness_audit.py`. | | smom-dbis-138 root `forge test` | Uses `foundry.toml` `[profile.default] skip` for legacy Uniswap V2 vendor trees (0.5/0.6 solc); scoped work still uses `bash scripts/forge/scope.sh …` | | cWUSDT Mainnet USD pricing (on-chain + runbook) | `./scripts/deployment/price-cw-token-mainnet.sh` — `docs/03-deployment/CW_TOKEN_USD_PRICING_RUNBOOK.md` | | Deployer LP balances (mesh inventory) | `python3 scripts/deployment/check-deployer-lp-balances.py` — scans `deployment-status.json` + `reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json`; **UniV2** `lpToken` = pair; **DODO DVM** LP shares = `balanceOf(pool)`; on failure, probes `_BASE_TOKEN_` / `_BASE_CAPITAL_TOKEN_` / `_QUOTE_CAPITAL_TOKEN_` + extra public RPCs (`--no-resolve-dodo` skips; `--chain-id N` for one chain). JSON: `lpTokenAddress`, `lpResolution`, `lpBalances[]`. Use `--deployer` / `DEPLOYER_ADDRESS` if no `PRIVATE_KEY` | | Etherscan Value $0 for Mainnet `cW*` | Listing path (CoinGecko/CMC), not a contract toggle — `docs/04-configuration/coingecko/ETHERSCAN_USD_VALUE_MAINNET_TOKENS.md` | | Verify contracts on explorers (all networks) | `cd smom-dbis-138 && ./scripts/deployment/verify-all-networks-explorers.sh` — Blockscout 138, Etherscan + multichain `cW*`, Avax/Arb bridges, optional Cronos/Wemix/CCIPLogger | | Operator (LAN + secrets) | `./scripts/run-all-operator-tasks-from-lan.sh` (use `--skip-backup` if `NPM_PASSWORD` unset; backup also needs `NPM_EMAIL` in `.env`) | -| Remote SSH to dev VM (5700 / `192.168.11.59`) for runner & deploy API | [docs/04-configuration/DEV_VM_SSH_REMOTE_ACCESS.md](docs/04-configuration/DEV_VM_SSH_REMOTE_ACCESS.md) (Cloudflare Access + tunnel, or UDM allowlist) | +| Remote SSH to dev VM (5700 / `192.168.11.59`) for runner & deploy API | [DEV_VM_SSH_REMOTE_ACCESS.md](docs/04-configuration/DEV_VM_SSH_REMOTE_ACCESS.md); **move workstation `~/projects` → Dev VM:** [DEV_VM_WORKSTATION_MIGRATION_RUNBOOK.md](docs/04-configuration/DEV_VM_WORKSTATION_MIGRATION_RUNBOOK.md), `scripts/deployment/sync-local-projects-to-dev-vm.sh` (rsync code 23 on `--delete-remote`: `scripts/deployment/fix-dev-vm-srv-projects-ownership.sh`) | | Cloudflare bulk DNS → `PUBLIC_IP` | `./scripts/update-all-dns-to-public-ip.sh` — use **`--dry-run`** and **`--zone-only=sankofa.nexus`** (or `d-bis.org` / `mim4u.org` / `defi-oracle.io`) to limit scope; see script header. Prefer scoped **`CLOUDFLARE_API_TOKEN`** (see `.env.master.example`). | ## Git submodules diff --git a/config/ip-addresses.conf b/config/ip-addresses.conf index a985d814..f097cbfc 100644 --- a/config/ip-addresses.conf +++ b/config/ip-addresses.conf @@ -91,6 +91,10 @@ ORDER_POSTGRES_PRIMARY="192.168.11.44" ORDER_POSTGRES_REPLICA="192.168.11.45" # Dedicated order-redis LXC (e.g. VMID 10020) not present on cluster as of 2026-03; reserve for scripts / future CT ORDER_REDIS_IP="192.168.11.38" +# Order portal CTs (VMID 10090–10092) — reconciled with live inventory 2026-05-11 (r630-04) +ORDER_PORTAL_PUBLIC_IP="${ORDER_PORTAL_PUBLIC_IP:-192.168.11.180}" +ORDER_PORTAL_INTERNAL_IP="${ORDER_PORTAL_INTERNAL_IP:-192.168.11.181}" +ORDER_MCP_LEGAL_IP="${ORDER_MCP_LEGAL_IP:-192.168.11.182}" # DBIS Service IPs DBIS_POSTGRES_PRIMARY="192.168.11.105" diff --git a/config/proxmox-operational-template.json b/config/proxmox-operational-template.json index d2c1561b..f5047e02 100644 --- a/config/proxmox-operational-template.json +++ b/config/proxmox-operational-template.json @@ -1,7 +1,7 @@ { "schemaVersion": "1.0.0", - "updated": "2026-03-23", - "description": "Operational template: Proxmox VE nodes, LAN/WAN, NPMplus ingress, workloads (VMID/IP/hostname/FQDN), Besu peering summary, and deployment prerequisites. Authoritative detail remains in docs/04-configuration/ALL_VMIDS_ENDPOINTS.md and config/ip-addresses.conf \u2014 update those first, then sync this file. Live inventory reconciled 2026-03-23 vs cluster SSH audit; order-legal (10070) ARP fix 2026-03-25 (IP_ORDER_LEGAL).", + "updated": "2026-05-09", + "description": "Operational template: Proxmox VE nodes, LAN/WAN, NPMplus ingress, workloads (VMID/IP/hostname/FQDN), Besu peering summary, and deployment prerequisites. Authoritative detail remains in docs/04-configuration/ALL_VMIDS_ENDPOINTS.md and config/ip-addresses.conf — update those first, then sync this file. **Cluster inventory 2026-05-09:** PVE 9.1.7; 136 running LXC/QEMU; ml110 0 guests; r630-01 57, r630-02 41, r630-03 19, r630-04 19 (pvesh /cluster/resources).", "canonicalSources": [ "config/ip-addresses.conf", "docs/04-configuration/ALL_VMIDS_ENDPOINTS.md", @@ -108,7 +108,8 @@ ], "primary_ingress_ip": "192.168.11.167", "public_ipv4": "76.53.10.36", - "purpose": "Main d-bis.org, explorer, Option B RPC hostnames, MIM4U, primary ingress" + "purpose": "Main d-bis.org, explorer, Option B RPC hostnames, MIM4U, primary ingress", + "mission_critical_notes": "preferred_node r630-01; resize to >=2 vCPU / 2048 MiB for production headroom; pair with 10234 via VIP per NPMPLUS_MISSION_CRITICAL_DISTRIBUTION_AND_HA_PLAN.md" }, { "vmid": 10234, @@ -117,7 +118,8 @@ ], "public_ipv4": "76.53.10.37", "purpose": "Secondary / HA NPMplus (verify running; doc may show stopped)", - "status_note": "Confirm on cluster before relying on HA" + "status_note": "Confirm on cluster before relying on HA", + "mission_critical_notes": "preferred_node r630-02; verify memory/swap in UI matches primary; implement Keepalived + shared state per NPMPLUS_HA_SETUP_GUIDE.md" }, { "vmid": 10235, @@ -126,7 +128,8 @@ ], "public_ipv4": "76.53.10.38", "designated_public_ip_alt": "76.53.10.42", - "purpose": "rpc-core-2, Alltra, HYBX \u2014 see NPMPLUS_ALLTRA_HYBX_MASTER_PLAN.md" + "purpose": "rpc-core-2, Alltra, HYBX \u2014 see NPMPLUS_ALLTRA_HYBX_MASTER_PLAN.md", + "mission_critical_notes": "relocate off r630-01 when possible (e.g. r630-03) to isolate blast radius \u2014 see NPMPLUS_MISSION_CRITICAL_DISTRIBUTION_AND_HA_PLAN.md" }, { "vmid": 10236, @@ -134,7 +137,8 @@ "192.168.11.170" ], "public_ipv4": "76.53.10.40", - "purpose": "Dev/Codespaces tunnel, Gitea, Proxmox admin UI" + "purpose": "Dev/Codespaces tunnel, Gitea, Proxmox admin UI", + "mission_critical_notes": "relocate off r630-01 when possible (e.g. r630-04); restrict admin :81 \u2014 see NPMPLUS_MISSION_CRITICAL_DISTRIBUTION_AND_HA_PLAN.md" }, { "vmid": 10237, @@ -811,11 +815,11 @@ { "vmid": 6000, "hostname": "fabric-1", - "ipv4": "192.168.11.65", + "ipv4": "192.168.11.113", "preferred_node": "r630-02", "category": "dlt", - "runtime_state": "reserved_placeholder_stopped", - "notes": "As of 2026-03-28 this CT has been reclassified as a reserved placeholder and stopped. Earlier app-native checks found no active Fabric peer/orderer/couchdb processes, no expected listeners, and no meaningful Fabric payload under /opt, /etc, or /var.", + "runtime_state": "active_fabric_network", + "notes": "Live inventory 2026-05-11: running @ 192.168.11.113 on r630-02 (was placeholder when at .65).", "ports": [ { "port": 7051 @@ -1317,7 +1321,7 @@ "vmid": 3000, "hostname": "ml-node-1", "ipv4": "192.168.11.60", - "preferred_node": "ml110", + "preferred_node": "r630-01", "category": "ml", "ports": [], "fqdns": [] @@ -1326,7 +1330,7 @@ "vmid": 3001, "hostname": "ml-node-2", "ipv4": "192.168.11.61", - "preferred_node": "ml110", + "preferred_node": "r630-01", "category": "ml", "ports": [], "fqdns": [] @@ -1335,7 +1339,7 @@ "vmid": 3002, "hostname": "ml-node-3", "ipv4": "192.168.11.62", - "preferred_node": "ml110", + "preferred_node": "r630-01", "category": "ml", "ports": [], "fqdns": [] @@ -1343,8 +1347,8 @@ { "vmid": 3003, "hostname": "ml-node-4", - "ipv4": "192.168.11.63", - "preferred_node": "ml110", + "ipv4": "192.168.11.66", + "preferred_node": "r630-01", "category": "ml", "ports": [], "fqdns": [] @@ -1813,8 +1817,8 @@ { "vmid": 10090, "hostname": "order-portal-public", - "ipv4": "192.168.11.36", - "preferred_node": "r630-01", + "ipv4": "192.168.11.180", + "preferred_node": "r630-04", "category": "order", "ports": [ { @@ -1827,8 +1831,8 @@ { "vmid": 10091, "hostname": "order-portal-internal", - "ipv4": "192.168.11.35", - "preferred_node": "r630-01", + "ipv4": "192.168.11.181", + "preferred_node": "r630-04", "category": "order", "ports": [], "fqdns": [] @@ -1836,8 +1840,8 @@ { "vmid": 10092, "hostname": "order-mcp-legal", - "ipv4": "192.168.11.37", - "preferred_node": "r630-01", + "ipv4": "192.168.11.182", + "preferred_node": "r630-04", "category": "order", "ports": [], "fqdns": [] diff --git a/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md b/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md index 4038c108..ee93cbaf 100644 --- a/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md +++ b/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md @@ -124,9 +124,9 @@ Machine-derived rows below come from `services[]` in `config/proxmox-operational | 10060 | order-dataroom | 192.168.11.42 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | | 10070 | order-legal | 192.168.11.87 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | | 10080 | order-eresidency | 192.168.11.43 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | -| 10090 | order-portal-public | 192.168.11.36 | shared / non-concurrent mapping — verify live owner | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | -| 10091 | order-portal-internal | 192.168.11.35 | shared / non-concurrent mapping — verify live owner | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | -| 10092 | order-mcp-legal | 192.168.11.37 | shared / non-concurrent mapping — verify live owner | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10090 | order-portal-public | 192.168.11.180 | VLAN11 CT — reconciled 2026-05-11 | The Order service | unspecified | TBD | TBD | r630-04 | N/A | application | +| 10091 | order-portal-internal | 192.168.11.181 | VLAN11 CT — reconciled 2026-05-11 | The Order service | unspecified | TBD | TBD | r630-04 | N/A | application | +| 10092 | order-mcp-legal | 192.168.11.182 | VLAN11 CT — reconciled 2026-05-11 | The Order service | unspecified | TBD | TBD | r630-04 | N/A | application | | 10100 | dbis-postgres-primary | 192.168.11.105 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | | 10101 | dbis-postgres-replica-1 | 192.168.11.106 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | | 10120 | dbis-redis | 192.168.11.125 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | @@ -137,7 +137,7 @@ Machine-derived rows below come from `services[]` in `config/proxmox-operational | 10201 | order-grafana | 192.168.11.47 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | | 10202 | order-opensearch | 192.168.11.48 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | | 10210 | order-haproxy | 192.168.11.39 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | -| 10230 | order-vault | 192.168.11.55 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10230 | order-vault | 192.168.11.55 | unique in template | The Order service | unspecified | TBD | TBD | r630-04 | N/A | application | | 10232 | ct10232 | 192.168.11.56 | unique in template | General CT | unspecified | TBD | TBD | r630-01 | N/A | standard internal | | 10233 | npmplus-primary | 192.168.11.167 | unique in template | NPMplus ingress | unspecified | TBD | TBD | r630-01 | N/A | edge ingress | | 10234 | npmplus-secondary | 192.168.11.168 | unique in template | NPMplus ingress | unspecified | TBD | TBD | r630-02 | N/A | edge ingress | diff --git a/docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md b/docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md index 033d1f07..205cfdf2 100644 --- a/docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md +++ b/docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md @@ -82,7 +82,7 @@ Use the full table in **ALL_VMIDS_ENDPOINTS** (“NPMplus Endpoint Configuration **the-order.sankofa.nexus:** NPMplus → order HAProxy **10210** @ **192.168.11.39:80** (proxies to Sankofa portal **192.168.11.51:3000**). See `scripts/deployment/provision-order-haproxy-10210.sh`. -### 5.1 Order stack (live VMIDs, r630-01 unless noted) +### 5.1 Order stack (live VMIDs; core APIs mostly **r630-01**; portals **r630-04**) | VMID | Hostname | IP | Role (short) | |------|----------|-----|----------------| @@ -92,9 +92,9 @@ Use the full table in **ALL_VMIDS_ENDPOINTS** (“NPMplus Endpoint Configuration | 10060 | order-dataroom | 192.168.11.42 | Dataroom | | 10070 | order-legal | **192.168.11.87** | Legal — **moved off .54 2026-03-25** (`IP_ORDER_LEGAL`); .54 is **only** VMID 7804 gov-portals | | 10080 | order-eresidency | 192.168.11.43 | eResidency | -| 10090 | order-portal-public | 192.168.11.36 | Public portal | -| 10091 | order-portal-internal | 192.168.11.35 | Internal portal | -| 10092 | order-mcp-legal | 192.168.11.37 | MCP legal | +| 10090 | order-portal-public | 192.168.11.180 | Public portal (**r630-04**) | +| 10091 | order-portal-internal | 192.168.11.181 | Internal portal (**r630-04**) | +| 10092 | order-mcp-legal | 192.168.11.182 | MCP legal (**r630-04**) | | 10200 | order-prometheus | 192.168.11.46 | Metrics | | 10201 | order-grafana | 192.168.11.47 | Dashboards | | 10202 | order-opensearch | 192.168.11.48 | Search | diff --git a/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md b/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md index 933bc0e6..ca55dc68 100644 --- a/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md +++ b/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md @@ -1,24 +1,24 @@ # Complete VMID and Endpoints Reference -**Last Updated:** 2026-05-09 -**Document Version:** 1.3 +**Last Updated:** 2026-05-11 +**Document Version:** 1.4 **Status:** Active Documentation — **Master (source of truth)** for VMID, IP, port, and domain mapping. Use this with the live Besu fleet map in [../06-besu/BESU_NODE_CONFIGURATION_MAP_20260424.md](../06-besu/BESU_NODE_CONFIGURATION_MAP_20260424.md) and the cluster audit in [`../../scripts/verify/check-cluster-besu-inventory.sh`](../../scripts/verify/check-cluster-besu-inventory.sh). **Operational template (hosts, peering, deployment gates, JSON):** [../03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md](../03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md) · [`config/proxmox-operational-template.json`](../../config/proxmox-operational-template.json) --- -**Date**: 2026-05-09 +**Date**: 2026-05-11 **Status**: Current Active Configuration (Reconciled) -**Last Updated**: 2026-05-09 -**Verification Status**: ✅ Cluster-wide guest inventory — **136** running LXC/QEMU (**2026-05-09** `pvesh get /cluster/resources`); **ml110** **0** guests; primary counts on **r630-01** (57), **r630-02** (41), **r630-03** (19), **r630-04** (19). Besu fleet detail: host audit + [`../../scripts/verify/check-cluster-besu-inventory.sh`](../../scripts/verify/check-cluster-besu-inventory.sh). +**Last Updated**: 2026-05-11 +**Verification Status**: ✅ Cluster-wide guest inventory — **137** running LXC/QEMU (regenerate: `bash scripts/it-ops/export-live-inventory-and-drift.sh` → **`reports/status/live_inventory.json`**; same collector runs on **r630-01** as **`/opt/proxmox/...`**). Parses **`ipconfig*`** (QEMU) and **`net*`** (LXC). **ml110** **0** guests; primary counts on **r630-01** (57), **r630-02** (41), **r630-03** (20), **r630-04** (19). Besu fleet detail: host audit + [`../../scripts/verify/check-cluster-besu-inventory.sh`](../../scripts/verify/check-cluster-besu-inventory.sh). --- ## Quick Summary -- **Cluster (all nodes, LXC+QEMU) — running:** **136** (**2026-05-09** `pvesh get /cluster/resources`); **all** were `running` in that pass. -- **Per Proxmox node (guests):** **r630-01** 57, **r630-02** 41, **r630-03** 19, **r630-04** 19, **ml110** 0. +- **Cluster (all nodes, LXC+QEMU) — running:** **137** (**2026-05-11** live inventory export); **all** were `running` in that pass. +- **Per Proxmox node (guests):** **r630-01** 57, **r630-02** 41, **r630-03** 20, **r630-04** 19, **ml110** 0. - **Documented VMID rows** in this file: 50+ service entries (excl. deprecated); category rolls below are **Besu / app taxonomy** — reconcile exact Besu counts with `check-cluster-besu-inventory.sh` and the Besu map doc. - **Infrastructure Services** (sample category): 10 - **Blockchain Nodes**: 37 canonical Besu nodes (Validators: 5, Sentries: 11, RPC: 21) — verify against live map @@ -35,23 +35,32 @@ ## Infrastructure Services -### Proxmox Infrastructure (r630-02) +### Proxmox Infrastructure (mostly **r630-01**; Omada not present) | VMID | IP Address | Hostname | Status | Endpoints | Purpose | |------|------------|----------|--------|-----------|---------| | 100 | 192.168.11.32 | proxmox-mail-gateway | ✅ Running | SMTP: 25, 587, 465 | Email gateway | | 101 | 192.168.11.33 | proxmox-datacenter-manager | ✅ Running | Web: 8006 | Datacenter management | -| 103 | 192.168.11.30 | omada | ✅ Running | Web: 8043 | Omada controller | +| 102 | 192.168.11.34 | cloudflared | ✅ Running | Tunnel | Cloudflare Tunnel (`cloudflared`) | | 104 | 192.168.11.31 | gitea | ✅ Running | Web: 80, 443 | Git repository | | 105 | 192.168.11.26 | nginxproxymanager | ✅ Running | Web: 80, 81, 443 | Nginx Proxy Manager (legacy) | | 130 | 192.168.11.27 | monitoring-1 | ✅ Running | Web: 80, 443 | Monitoring services | +**Not in live cluster inventory (2026-05-11):** **103** (Omada). Prior doc row `192.168.11.30` **omada** is **retired** unless reprovisioned. + ### NPMplus (r630-01 / r630-02) | VMID | IP Address | Hostname | Status | Endpoints | Purpose | |------|------------|----------|--------|-----------|---------| -| 10233 | 192.168.11.167 | npmplus | ✅ Running | Web: 80, 81, 443 | NPMplus reverse proxy | -| 10234 | 192.168.11.168 | npmplus-secondary | ✅ Running | Web: 80, 81, 443 | NPMplus secondary (HA); restarted 2026-02-03 | +| 10233 | 192.168.11.167 | npmplus | ✅ Running | Web: 80, 81, 443 | NPMplus reverse proxy (primary ingress) | +| 10234 | 192.168.11.168 | npmplus-secondary | ✅ Running | Web: 80, 81, 443 | NPMplus secondary (HA standby); **r630-02** | +| 10235 | 192.168.11.169 | npmplus-alltra-hybx | ✅ Running | Web: 80, 81, 443 | Third NPM — Alltra/HYBX / rpc-core-2 style paths | +| 10236 | 192.168.11.170 | npmplus-fourth | ✅ Running | Web: 80, 81, 443 | Fourth NPM — dev / Codespaces / Gitea tunnel | +| 10237 | 192.168.11.171 | npmplus-mifos | ✅ Running | Web: 80, 81, 443 | NPMplus (Mifos / Fineract path); **r630-02** | + +**Live placement (reconcile with cluster):** Run `bash scripts/maintenance/npmplus-cluster-placement-status.sh`. As of **2026-05**, **10233**, **10235**, **10236** often run on **r630-01**; **10234** on **r630-02**. **Target:** redistribute for blast-radius — see [NPMPLUS_MISSION_CRITICAL_DISTRIBUTION_AND_HA_PLAN.md](NPMPLUS_MISSION_CRITICAL_DISTRIBUTION_AND_HA_PLAN.md). + +**Dual-homed (10233):** `net0` **192.168.11.166/24**; `net1` **192.168.11.167/24** (default route / gateway on **.167**). Use **.167** for NPMplus API, public ingress, and runbook examples. `live_inventory.json` may list only the first `net*` address (**.166**). **Note**: NPMplus primary is on VLAN 11 (192.168.11.167). Secondary NPMplus instance on r630-02 for HA configuration. @@ -61,11 +70,15 @@ ## RPC Translator Supporting Services +**Status (2026-05-11):** VMIDs **106–108** are **not** present in cluster `live_inventory.json`. Prior translator LXCs are **retired** unless reprovisioned. + | VMID | IP Address | Hostname | Status | Endpoints | Purpose | |------|------------|----------|--------|-----------|---------| -| 106 | 192.168.11.110 | redis-rpc-translator | ✅ Running | Redis: 6379 | Distributed nonce management | -| 107 | 192.168.11.111 | web3signer-rpc-translator | ✅ Running | Web3Signer: 9000 | Transaction signing | -| 108 | 192.168.11.112 | vault-rpc-translator | ✅ Running | Vault: 8200 | Secrets management | +| — | — | (historical) redis-rpc-translator | Retired | Redis: 6379 | Was ~106 @ `.110` | +| — | — | (historical) web3signer-rpc-translator | Retired | Web3Signer: 9000 | Was ~107 @ `.111` | +| — | — | (historical) vault-rpc-translator | Retired | Vault: 8200 | Was ~108 @ `.112` | + +**Live reassignment:** `.111` / `.112` are used by **8811** (`sankofa-proxmox-mcp`) and **8812** (`operator-services`) on **r630-04** — see [Supplementary cluster inventory](#supplementary-cluster-inventory-live-2026-05-11). --- @@ -119,6 +132,7 @@ All RPC nodes have been migrated to a new VMID structure for better organization | VMID | IP Address | Hostname | Status | Block | Peers | Endpoints | Purpose | |------|------------|----------|--------|-------|-------|-----------|---------| | 2101 | 192.168.11.211 | besu-rpc-core-1 | ✅ Running | 1,145,367 | 7 | Besu: 8545/8546, P2P: 30303, Metrics: 9545 | Core RPC node | +| 2102 | 192.168.11.212 | besu-rpc-core-2 | ✅ Running | (live) | (live) | Besu: 8545/8546, P2P: 30303, Metrics: 9545 | Core RPC node 2 (**r630-03**) | | 2103 | 192.168.11.217 | besu-rpc-core-thirdweb | ✅ Running | Live SSH verified 2026-04-24 | Live SSH verified 2026-04-24 | Besu: 8545/8546, P2P: 30303, Metrics: 9545 | Core Thirdweb admin RPC node | | **2201** | **192.168.11.221** | besu-rpc-public-1 | ✅ Running | 1,145,367 | 7 | Besu: 8545/8546, P2P: 30303, Metrics: 9545 | Public RPC node **(FIXED PERMANENT)** | | 2301 | 192.168.11.232 | besu-rpc-private-1 | ✅ Running | Cluster CT confirmed on `r630-03` | - | Besu: 8545/8546, P2P: 30303, Metrics: 9545 | Fireblocks-dedicated RPC on `r630-03` | @@ -192,16 +206,12 @@ These were found live on `r630-01` during the same SSH pass, but they do not exi **Status**: Historical migration reference only. The rows below refer to the old `.250-.255/.201-.204` plan, not the live `.172-.174/.246-.248` ALLTRA/HYBX RPCs found during the 2026-04-24 SSH pass. -The following VMIDs have been permanently removed: +**Historic VMIDs 2500–2505 (Besu RPC, destroyed):** former assignments — **2500** @ `.250`, **2501** @ `.251`, **2502** @ `.252`, **2503** @ `.253`, **2504** @ `.254`, **2505** @ `.201` — superseded by VMIDs **2101**, **2201**, **2301**, **2303**, **2304**, **2305** respectively. Those numeric VMIDs were **later reused** for ALLTRA/HYBX internal RPC (same VMID number, different workload). Current IPs are in **Additional Live Internal ALLTRA / HYBX RPC Nodes**. + +The following VMIDs have been permanently removed (**no reuse on live cluster**): | VMID | Old IP Address | Old Hostname | Status | Replaced By | |------|----------------|--------------|--------|-------------| -| 2500 | 192.168.11.250 | besu-rpc-1 | 🗑️ Destroyed | VMID 2101 | -| 2501 | 192.168.11.251 | besu-rpc-2 | 🗑️ Destroyed | VMID 2201 | -| 2502 | 192.168.11.252 | besu-rpc-3 | 🗑️ Destroyed | VMID 2301 | -| 2503 | 192.168.11.253 | besu-rpc-ali-0x8a | 🗑️ Destroyed | VMID 2303 | -| 2504 | 192.168.11.254 | besu-rpc-ali-0x1 | 🗑️ Destroyed | VMID 2304 | -| 2505 | 192.168.11.201 | besu-rpc-luis-0x8a | 🗑️ Destroyed | VMID 2305 | | 2506 | 192.168.11.202 | besu-rpc-luis-0x1 | 🗑️ Destroyed | VMID 2306 | | 2507 | 192.168.11.203 | besu-rpc-putu-0x8a | 🗑️ Destroyed | VMID 2307 | | 2508 | 192.168.11.204 | besu-rpc-putu-0x1 | 🗑️ Destroyed | VMID 2308 | @@ -227,6 +237,8 @@ The following VMIDs have been permanently removed: **CI/CD:** Gitea `.gitea/workflows/deploy.yml` — secrets `TREASURY_DEPLOY_HOST`, `TREASURY_DEPLOY_USER`, `TREASURY_DEPLOY_SSH_KEY`, `TREASURY_DEPLOY_PATH`; runner must reach **192.168.11.94** on LAN. +**Public edge (2026-05):** **`dealflow.d-bis.org`** → Cloudflare **A** on **`d-bis.org`** (script: **`scripts/update-all-dns-to-public-ip.sh`**) → UDM/NPMplus **76.53.10.36:443** → NPMplus **`https://192.168.11.94:443`** (`forward_scheme` **https**). Backend **`CORS_ORIGIN`** must list **`https://dealflow.d-bis.org`** for **`POST /api/auth/demo-login`** from the browser. TLS: NPM Let’s Encrypt ( **`request-npmplus-certificates.sh`** with `CERT_DOMAINS_FILTER` if needed). + --- ### Blockchain Explorer @@ -269,7 +281,7 @@ The following VMIDs have been permanently removed: | VMID | IP Address | Hostname | Status | Endpoints | Purpose | |------|------------|----------|--------|-----------|---------| -| 6000 | 192.168.11.65 | fabric-1 | ✅ Running | Peer: 7051, Orderer: 7050 | Hyperledger Fabric network | +| 6000 | 192.168.11.113 | fabric-1 | ✅ Running | Peer: 7051, Orderer: 7050 | Hyperledger Fabric network | --- @@ -355,7 +367,7 @@ The following VMIDs have been permanently removed: --- -### The Order — microservices (r630-01) +### The Order — microservices (mostly **r630-01**; portals **r630-04**) | VMID | IP Address | Hostname | Status | Endpoints | Purpose | |------|------------|----------|--------|-----------|---------| @@ -365,17 +377,17 @@ The following VMIDs have been permanently removed: | 10060 | 192.168.11.42 | order-dataroom | ✅ Running | Web: 80 | Dataroom | | 10070 | **192.168.11.87** | order-legal | ✅ Running | API | Legal — **use `IP_ORDER_LEGAL` (.87); not .54** | | 10080 | 192.168.11.43 | order-eresidency | ✅ Running | API | eResidency | -| 10090 | 192.168.11.36 | order-portal-public | ✅ Running | Web | Public portal | -| 10091 | 192.168.11.35 | order-portal-internal | ✅ Running | Web | Internal portal | -| 10092 | 192.168.11.37 | order-mcp-legal | ✅ Running | API | MCP legal | +| 10090 | 192.168.11.180 | order-portal-public | ✅ Running | Web | Public portal | +| 10091 | 192.168.11.181 | order-portal-internal | ✅ Running | Web | Internal portal | +| 10092 | 192.168.11.182 | order-mcp-legal | ✅ Running | API | MCP legal | | 10200 | 192.168.11.46 | order-prometheus | ✅ Running | 9090 | Metrics (`IP_ORDER_PROMETHEUS`; not Order Redis) | | 10201 | 192.168.11.47 | order-grafana | ✅ Running | 3000 | Dashboards | | 10202 | 192.168.11.48 | order-opensearch | ✅ Running | 9200 | Search | | 10210 | 192.168.11.39 | order-haproxy | ✅ Running | 80 (HAProxy → portal :3000) | Edge for **the-order.sankofa.nexus**; HAProxy config via `config/haproxy/order-haproxy-10210.cfg.template` + `scripts/deployment/provision-order-haproxy-10210.sh` | -**Gov portals vs Order:** VMID **7804** alone uses **192.168.11.54** (`IP_GOV_PORTALS_DEV`). Order-legal must not use .54. +**Note:** **10090–10092** are on **r630-04** (not r630-01). **MIM4U** uses **7810/7811** on **.37/.36** (r630-02) — do not conflate with Order portal IPs. ---- +**Gov portals vs Order:** VMID **7804** alone uses **192.168.11.54** (`IP_GOV_PORTALS_DEV`). Order-legal must not use .54. ### Phoenix Vault Cluster (8640-8642) @@ -407,7 +419,10 @@ The following VMIDs have been permanently removed: | 5800 | 192.168.11.85 | (Mifos) | ✅ Running | Web: 80 | Mifos X + Fineract (OMNL) | LXC on r630-02; mifos.d-bis.org; see [MIFOS_R630_02_DEPLOYMENT.md](MIFOS_R630_02_DEPLOYMENT.md) | | 5801 | 192.168.11.58 | dapp-smom | — | Web: 80 | DApp (frontend-dapp) for Chain 138 bridge | LXC; see [DAPP_LXC_DEPLOYMENT.md](../03-deployment/DAPP_LXC_DEPLOYMENT.md); NPMplus/tunnel dapp.d-bis.org | | 10232 | 192.168.11.56 | CT10232 | ✅ Running | Various | Container service | ✅ **IP CONFLICT RESOLVED** | -| 10234 | 192.168.11.168 | npmplus-secondary | ⏸️ Stopped | Web: 80, 81, 443 | NPMplus secondary (HA) | On r630-02 | +| 10203 | 192.168.11.228 | omdnl-org-web | ✅ Running | Web: 80 | OMNL / org web (small CT) | **r630-01**; renumbered from **.222** (2026-05-11) to resolve duplicate with **2104** | +| 2421 | 192.168.11.229 | mev-control-backend | ✅ Running | API / backend | MEV control platform backend | **r630-04**; renumbered from **.223** (2026-05-11) to resolve duplicate with **2202** | + +**Note:** **10234** is listed under **NPMplus** above (not stopped); older duplicate rows removed. **10203** / **2421** had briefly shared **.222** / **.223** with canonical Besu Justin RPC CTs — fixed by reassignment to **.228** / **.229**. --- @@ -423,12 +438,55 @@ The following VMIDs have been permanently removed: ### Machine Learning Nodes +**Placement:** LXCs **3000–3003** run on **r630-01** (hostname field remains `ml110` from template). + | VMID | IP Address | Hostname | Status | Endpoints | Purpose | |------|------------|----------|--------|-----------|---------| | 3000 | 192.168.11.60 | ml110 | ✅ Running | ML Services: Various | ML node 1 | | 3001 | 192.168.11.61 | ml110 | ✅ Running | ML Services: Various | ML node 2 | | 3002 | 192.168.11.62 | ml110 | ✅ Running | ML Services: Various | ML node 3 | -| 3003 | 192.168.11.63 | ml110 | ✅ Running | ML Services: Various | ML node 4 | +| 3003 | 192.168.11.66 | ml110 | ✅ Running | ML Services: Various | ML node 4 (**r630-01**) | + + +--- + +## Supplementary cluster inventory (live 2026-05-11) + +Guests present in `r630-01:/opt/proxmox/reports/status/live_inventory.json` at collection time but **not** listed in category tables above (for automation cross-checks). Canonical Besu / NPMplus rows are omitted here when already duplicated above. + +| VMID | IP Address | Hostname | Node | Notes | +|------|------------|----------|------|-------| +| 2410 | 192.168.11.218 | info-defi-oracle-web | r630-01 | | +| 5201 | 192.168.11.177 | cacti-alltra-1 | r630-02 | | +| 5202 | 192.168.11.251 | cacti-hybx-1 | r630-02 | | +| 5700 | 192.168.11.59 | dev-vm | r630-04 | | +| 5701 | 192.168.11.65 | gitea-runner-1 | r630-04 | | +| 5702 | 192.168.11.82 | ai-inf-1 | r630-01 | | +| 5705 | 192.168.11.86 | ai-inf-2 | r630-01 | | +| 5751 | 192.168.11.69 | op-stack-deployer-1 | r630-02 | | +| 5752 | 192.168.11.70 | op-stack-ops-1 | r630-02 | | +| 6001 | 192.168.11.178 | fabric-alltra-1 | r630-02 | | +| 6002 | 192.168.11.252 | fabric-hybx-1 | r630-02 | | +| 6202 | 192.168.11.175 | firefly-alltra-1 | r630-02 | | +| 6203 | 192.168.11.176 | firefly-alltra-2 | r630-02 | | +| 6204 | 192.168.11.249 | firefly-hybx-1 | r630-02 | | +| 6205 | 192.168.11.250 | firefly-hybx-2 | r630-02 | | +| 6401 | 192.168.11.179 | indy-alltra-1 | r630-02 | | +| 6402 | 192.168.11.253 | indy-hybx-1 | r630-02 | | +| 6500 | 192.168.11.88 | aries-1 | r630-02 | | +| 6600 | 192.168.11.93 | caliper-1 | r630-02 | | +| 7806 | 192.168.11.63 | sankofa-public-web | r630-01 | | +| 7807 | — | cc-phase1-lab | r630-01 | No static `192.168.11.x` in `net*` (verify inside CT) | +| 7808 | — | cc-phase1-k3s | r630-01 | No static `192.168.11.x` in `net*` (verify inside CT) | +| 7815 | 192.168.11.75 | cc-phase1-lab | r630-02 | Second cc-phase1 lab CT | +| 8604 | 10.160.0.14 | currencicombo-phoenix-1 | r630-01 | Internal overlay; see Phoenix Extensions above | +| 8811 | 192.168.11.111 | sankofa-proxmox-mcp | r630-04 | | +| 8812 | 192.168.11.112 | operator-services | r630-04 | | +| 10000 | 192.168.11.44 | order-postgres-primary | r630-01 | Also referenced as `ORDER_POSTGRES_PRIMARY` | +| 10001 | 192.168.11.45 | order-postgres-replica | r630-01 | | +| 10020 | 192.168.11.38 | order-redis | r630-04 | | +| 10230 | 192.168.11.55 | order-vault | r630-04 | | +| 10900 | 192.168.11.115 | mailcow-dbis | r630-01 | | --- @@ -460,7 +518,7 @@ Internet ↓ Cloudflare (DNS + DDoS Protection) ↓ -NPMplus (VMID 10233: 192.168.0.166:443) +NPMplus (VMID 10233: 192.168.11.167:443) ↓ VM Nginx (443) → Backend Services ``` diff --git a/reports/status/drift.json b/reports/status/drift.json new file mode 100644 index 00000000..b8e48e75 --- /dev/null +++ b/reports/status/drift.json @@ -0,0 +1,156 @@ +{ + "collected_at": "2026-05-11T17:21:42Z", + "guest_count": 137, + "duplicate_ips": {}, + "same_name_duplicate_ip_guests": {}, + "guest_ips_not_in_ip_addresses_conf": [ + "192.168.11.111", + "192.168.11.112", + "192.168.11.113", + "192.168.11.115", + "192.168.11.172", + "192.168.11.173", + "192.168.11.174", + "192.168.11.200", + "192.168.11.213", + "192.168.11.214", + "192.168.11.215", + "192.168.11.217", + "192.168.11.218", + "192.168.11.219", + "192.168.11.220", + "192.168.11.222", + "192.168.11.223", + "192.168.11.224", + "192.168.11.225", + "192.168.11.226", + "192.168.11.227", + "192.168.11.228", + "192.168.11.229", + "192.168.11.233", + "192.168.11.234", + "192.168.11.235", + "192.168.11.236", + "192.168.11.237", + "192.168.11.238", + "192.168.11.243", + "192.168.11.244", + "192.168.11.245", + "192.168.11.246", + "192.168.11.247", + "192.168.11.248", + "192.168.11.249", + "192.168.11.253", + "192.168.11.27", + "192.168.11.29", + "192.168.11.33", + "192.168.11.34", + "192.168.11.35", + "192.168.11.40", + "192.168.11.41", + "192.168.11.42", + "192.168.11.43", + "192.168.11.47", + "192.168.11.49", + "192.168.11.55", + "192.168.11.56", + "192.168.11.57", + "192.168.11.60", + "192.168.11.61", + "192.168.11.62", + "192.168.11.63", + "192.168.11.69", + "192.168.11.70", + "192.168.11.75", + "192.168.11.80", + "192.168.11.82", + "192.168.11.86", + "192.168.11.88", + "192.168.11.89", + "192.168.11.91", + "192.168.11.92", + "192.168.11.93" + ], + "ip_addresses_conf_ips_not_on_guests": [ + "1.0.0.1", + "1.1.1.1", + "192.168.11.0", + "192.168.11.167", + "192.168.11.19", + "192.168.11.20", + "192.168.11.201", + "192.168.11.203", + "192.168.11.204", + "192.168.11.23", + "192.168.11.255", + "192.168.11.30", + "192.168.11.67", + "192.168.11.68", + "192.168.11.8", + "76.53.10.32", + "76.53.10.40", + "76.53.10.41", + "76.53.10.42" + ], + "guest_lan_ips_not_in_declared_sources": [], + "declared_lan11_ips_not_on_live_guests": [ + "192.168.11.0", + "192.168.11.167", + "192.168.11.19", + "192.168.11.20", + "192.168.11.201", + "192.168.11.203", + "192.168.11.204", + "192.168.11.23", + "192.168.11.255", + "192.168.11.30", + "192.168.11.67", + "192.168.11.68", + "192.168.11.8" + ], + "vmid_ip_mismatch_live_vs_all_vmids_doc": [ + { + "vmid": "10233", + "live_ip": "192.168.11.166", + "all_vmids_doc_ip": "192.168.11.167" + } + ], + "vmids_in_all_vmids_doc_not_on_cluster": [ + "2420", + "2430", + "2440", + "2460", + "2470", + "2480", + "2506", + "2507", + "2508" + ], + "vmids_on_cluster_not_in_all_vmids_table": { + "count": 3, + "sample_vmids": [ + "7807", + "7808", + "8604" + ], + "note": "ALL_VMIDS_ENDPOINTS pipe tables do not list every guest; large count is normal." + }, + "hypervisor_and_infra_ips_excluded_from_guest_match": [ + "192.168.11.1", + "192.168.11.10", + "192.168.11.11", + "192.168.11.12", + "192.168.11.2", + "192.168.11.24", + "192.168.11.25", + "192.168.11.26", + "76.53.10.33", + "76.53.10.34" + ], + "declared_sources": { + "ip_addresses_conf_ipv4_count": 97, + "all_vmids_md_lan11_count": 136, + "all_vmids_md_row_count": 143 + }, + "notes": [] +} \ No newline at end of file diff --git a/reports/status/live_inventory.json b/reports/status/live_inventory.json new file mode 100644 index 00000000..2d485dcb --- /dev/null +++ b/reports/status/live_inventory.json @@ -0,0 +1,1380 @@ +{ + "collected_at": "2026-05-11T17:21:42Z", + "source": "proxmox_cluster_pvesh_plus_config", + "guests": [ + { + "vmid": "100", + "type": "lxc", + "node": "r630-01", + "name": "proxmox-mail-gateway", + "status": "running", + "ip": "192.168.11.32", + "mac": "BC:24:11:3F:A2:B0", + "config_path": "/etc/pve/nodes/r630-01/lxc/100.conf" + }, + { + "vmid": "101", + "type": "lxc", + "node": "r630-01", + "name": "proxmox-datacenter-manager", + "status": "running", + "ip": "192.168.11.33", + "mac": "BC:24:11:AD:A7:28", + "config_path": "/etc/pve/nodes/r630-01/lxc/101.conf" + }, + { + "vmid": "102", + "type": "lxc", + "node": "r630-01", + "name": "cloudflared", + "status": "running", + "ip": "192.168.11.34", + "mac": "BC:24:11:2E:D9:AA", + "config_path": "/etc/pve/nodes/r630-01/lxc/102.conf" + }, + { + "vmid": "104", + "type": "lxc", + "node": "r630-01", + "name": "gitea", + "status": "running", + "ip": "192.168.11.31", + "mac": "BC:24:11:A8:F6:89", + "config_path": "/etc/pve/nodes/r630-01/lxc/104.conf" + }, + { + "vmid": "105", + "type": "lxc", + "node": "r630-01", + "name": "nginxproxymanager", + "status": "running", + "ip": "192.168.11.26", + "mac": "BC:24:11:71:6A:78", + "config_path": "/etc/pve/nodes/r630-01/lxc/105.conf" + }, + { + "vmid": "130", + "type": "lxc", + "node": "r630-01", + "name": "monitoring-1", + "status": "running", + "ip": "192.168.11.27", + "mac": "BC:24:11:E5:90:97", + "config_path": "/etc/pve/nodes/r630-01/lxc/130.conf" + }, + { + "vmid": "1000", + "type": "lxc", + "node": "r630-01", + "name": "besu-validator-1", + "status": "running", + "ip": "192.168.11.100", + "mac": "BC:24:11:08:A5:C7", + "config_path": "/etc/pve/nodes/r630-01/lxc/1000.conf" + }, + { + "vmid": "1001", + "type": "lxc", + "node": "r630-01", + "name": "besu-validator-2", + "status": "running", + "ip": "192.168.11.101", + "mac": "BC:24:11:83:66:6D", + "config_path": "/etc/pve/nodes/r630-01/lxc/1001.conf" + }, + { + "vmid": "1002", + "type": "lxc", + "node": "r630-01", + "name": "besu-validator-3", + "status": "running", + "ip": "192.168.11.102", + "mac": "BC:24:11:41:3D:5D", + "config_path": "/etc/pve/nodes/r630-01/lxc/1002.conf" + }, + { + "vmid": "1003", + "type": "lxc", + "node": "r630-03", + "name": "besu-validator-4", + "status": "running", + "ip": "192.168.11.103", + "mac": "BC:24:11:A6:7F:1B", + "config_path": "/etc/pve/nodes/r630-03/lxc/1003.conf" + }, + { + "vmid": "1004", + "type": "lxc", + "node": "r630-03", + "name": "besu-validator-5", + "status": "running", + "ip": "192.168.11.104", + "mac": "BC:24:11:67:CB:16", + "config_path": "/etc/pve/nodes/r630-03/lxc/1004.conf" + }, + { + "vmid": "1500", + "type": "lxc", + "node": "r630-01", + "name": "besu-sentry-1", + "status": "running", + "ip": "192.168.11.150", + "mac": "BC:24:11:44:4D:F1", + "config_path": "/etc/pve/nodes/r630-01/lxc/1500.conf" + }, + { + "vmid": "1501", + "type": "lxc", + "node": "r630-01", + "name": "besu-sentry-2", + "status": "running", + "ip": "192.168.11.151", + "mac": "BC:24:11:EC:1E:0B", + "config_path": "/etc/pve/nodes/r630-01/lxc/1501.conf" + }, + { + "vmid": "1502", + "type": "lxc", + "node": "r630-01", + "name": "besu-sentry-3", + "status": "running", + "ip": "192.168.11.152", + "mac": "BC:24:11:8C:8D:BB", + "config_path": "/etc/pve/nodes/r630-01/lxc/1502.conf" + }, + { + "vmid": "1503", + "type": "lxc", + "node": "r630-03", + "name": "besu-sentry-4", + "status": "running", + "ip": "192.168.11.153", + "mac": "BC:24:11:97:95:22", + "config_path": "/etc/pve/nodes/r630-03/lxc/1503.conf" + }, + { + "vmid": "1504", + "type": "lxc", + "node": "r630-03", + "name": "besu-sentry-ali", + "status": "running", + "ip": "192.168.11.154", + "mac": "BC:24:11:BE:0F:B3", + "config_path": "/etc/pve/nodes/r630-03/lxc/1504.conf" + }, + { + "vmid": "1505", + "type": "lxc", + "node": "r630-03", + "name": "besu-sentry-alltra-1", + "status": "running", + "ip": "192.168.11.213", + "mac": "BC:24:11:74:2B:55", + "config_path": "/etc/pve/nodes/r630-03/lxc/1505.conf" + }, + { + "vmid": "1506", + "type": "lxc", + "node": "r630-03", + "name": "besu-sentry-alltra-2", + "status": "running", + "ip": "192.168.11.214", + "mac": "BC:24:11:71:58:1D", + "config_path": "/etc/pve/nodes/r630-03/lxc/1506.conf" + }, + { + "vmid": "1507", + "type": "lxc", + "node": "r630-03", + "name": "besu-sentry-hybx-1", + "status": "running", + "ip": "192.168.11.244", + "mac": "BC:24:11:A9:5B:E9", + "config_path": "/etc/pve/nodes/r630-03/lxc/1507.conf" + }, + { + "vmid": "1508", + "type": "lxc", + "node": "r630-04", + "name": "besu-sentry-hybx-2", + "status": "running", + "ip": "192.168.11.245", + "mac": "BC:24:11:3C:5B:03", + "config_path": "/etc/pve/nodes/r630-04/lxc/1508.conf" + }, + { + "vmid": "1509", + "type": "lxc", + "node": "r630-03", + "name": "besu-sentry-thirdweb-01", + "status": "running", + "ip": "192.168.11.219", + "mac": "BC:24:11:19:50:09", + "config_path": "/etc/pve/nodes/r630-03/lxc/1509.conf" + }, + { + "vmid": "1510", + "type": "lxc", + "node": "r630-03", + "name": "besu-sentry-thirdweb-02", + "status": "running", + "ip": "192.168.11.220", + "mac": "BC:24:11:19:51:10", + "config_path": "/etc/pve/nodes/r630-03/lxc/1510.conf" + }, + { + "vmid": "2101", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-core-1", + "status": "running", + "ip": "192.168.11.211", + "mac": "BC:24:11:16:E7:02", + "config_path": "/etc/pve/nodes/r630-01/lxc/2101.conf" + }, + { + "vmid": "2102", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-core-2", + "status": "running", + "ip": "192.168.11.212", + "mac": "BC:24:11:03:54:8F", + "config_path": "/etc/pve/nodes/r630-03/lxc/2102.conf" + }, + { + "vmid": "2103", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-core-thirdweb", + "status": "running", + "ip": "192.168.11.217", + "mac": "BC:24:11:EE:E0:87", + "config_path": "/etc/pve/nodes/r630-01/lxc/2103.conf" + }, + { + "vmid": "2104", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-core-justin", + "status": "running", + "ip": "192.168.11.222", + "mac": "BC:24:11:0B:DF:78", + "config_path": "/etc/pve/nodes/r630-03/lxc/2104.conf" + }, + { + "vmid": "2105", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-core-jason", + "status": "running", + "ip": "192.168.11.225", + "mac": "BC:24:11:30:A3:49", + "config_path": "/etc/pve/nodes/r630-03/lxc/2105.conf" + }, + { + "vmid": "2201", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-public-1", + "status": "running", + "ip": "192.168.11.221", + "mac": "BC:24:11:76:9A:BA", + "config_path": "/etc/pve/nodes/r630-02/lxc/2201.conf" + }, + { + "vmid": "2202", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-public-justin", + "status": "running", + "ip": "192.168.11.223", + "mac": "BC:24:11:A5:64:90", + "config_path": "/etc/pve/nodes/r630-02/lxc/2202.conf" + }, + { + "vmid": "2203", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-public-jason", + "status": "running", + "ip": "192.168.11.226", + "mac": "BC:24:11:63:E8:5E", + "config_path": "/etc/pve/nodes/r630-02/lxc/2203.conf" + }, + { + "vmid": "2301", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-private-1", + "status": "running", + "ip": "192.168.11.232", + "mac": "BC:24:11:A6:E1:20", + "config_path": "/etc/pve/nodes/r630-03/lxc/2301.conf" + }, + { + "vmid": "2303", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-ali-0x8a", + "status": "running", + "ip": "192.168.11.233", + "mac": "BC:24:11:B2:78:33", + "config_path": "/etc/pve/nodes/r630-02/lxc/2303.conf" + }, + { + "vmid": "2304", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-ali-0x1", + "status": "running", + "ip": "192.168.11.234", + "mac": "BC:24:11:61:4D:F6", + "config_path": "/etc/pve/nodes/r630-03/lxc/2304.conf" + }, + { + "vmid": "2305", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-luis-0x8a", + "status": "running", + "ip": "192.168.11.235", + "mac": "BC:24:11:7B:2D:6E", + "config_path": "/etc/pve/nodes/r630-02/lxc/2305.conf" + }, + { + "vmid": "2306", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-luis-0x1", + "status": "running", + "ip": "192.168.11.236", + "mac": "BC:24:11:6F:68:81", + "config_path": "/etc/pve/nodes/r630-02/lxc/2306.conf" + }, + { + "vmid": "2307", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-putu-0x8a", + "status": "running", + "ip": "192.168.11.237", + "mac": "BC:24:11:44:12:E2", + "config_path": "/etc/pve/nodes/r630-02/lxc/2307.conf" + }, + { + "vmid": "2308", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-putu-0x1", + "status": "running", + "ip": "192.168.11.238", + "mac": "BC:24:11:7F:56:CC", + "config_path": "/etc/pve/nodes/r630-02/lxc/2308.conf" + }, + { + "vmid": "2309", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-private-justin", + "status": "running", + "ip": "192.168.11.224", + "mac": "BC:24:11:5F:BD:A8", + "config_path": "/etc/pve/nodes/r630-03/lxc/2309.conf" + }, + { + "vmid": "2310", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-private-jason", + "status": "running", + "ip": "192.168.11.227", + "mac": "BC:24:11:32:4B:85", + "config_path": "/etc/pve/nodes/r630-03/lxc/2310.conf" + }, + { + "vmid": "2400", + "type": "lxc", + "node": "r630-03", + "name": "thirdweb-rpc-1", + "status": "running", + "ip": "192.168.11.240", + "mac": "BC:24:11:AA:D7:31", + "config_path": "/etc/pve/nodes/r630-03/lxc/2400.conf" + }, + { + "vmid": "2401", + "type": "lxc", + "node": "r630-02", + "name": "besu-rpc-thirdweb-0x8a-1", + "status": "running", + "ip": "192.168.11.241", + "mac": "BC:24:11:39:4C:2C", + "config_path": "/etc/pve/nodes/r630-02/lxc/2401.conf" + }, + { + "vmid": "2402", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-thirdweb-0x8a-2", + "status": "running", + "ip": "192.168.11.242", + "mac": "BC:24:11:AF:52:DC", + "config_path": "/etc/pve/nodes/r630-03/lxc/2402.conf" + }, + { + "vmid": "2403", + "type": "lxc", + "node": "r630-03", + "name": "besu-rpc-thirdweb-0x8a-3", + "status": "running", + "ip": "192.168.11.243", + "mac": "BC:24:11:40:E0:52", + "config_path": "/etc/pve/nodes/r630-03/lxc/2403.conf" + }, + { + "vmid": "2410", + "type": "lxc", + "node": "r630-01", + "name": "info-defi-oracle-web", + "status": "running", + "ip": "192.168.11.218", + "mac": "BC:24:11:1E:05:AF", + "config_path": "/etc/pve/nodes/r630-01/lxc/2410.conf" + }, + { + "vmid": "2421", + "type": "lxc", + "node": "r630-04", + "name": "mev-control-backend", + "status": "running", + "ip": "192.168.11.229", + "mac": "BC:24:11:57:9A:F1", + "config_path": "/etc/pve/nodes/r630-04/lxc/2421.conf" + }, + { + "vmid": "2500", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-alltra-1", + "status": "running", + "ip": "192.168.11.172", + "mac": "BC:24:11:51:7E:F6", + "config_path": "/etc/pve/nodes/r630-01/lxc/2500.conf" + }, + { + "vmid": "2501", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-alltra-2", + "status": "running", + "ip": "192.168.11.173", + "mac": "BC:24:11:DD:40:6E", + "config_path": "/etc/pve/nodes/r630-01/lxc/2501.conf" + }, + { + "vmid": "2502", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-alltra-3", + "status": "running", + "ip": "192.168.11.174", + "mac": "BC:24:11:80:AF:A5", + "config_path": "/etc/pve/nodes/r630-01/lxc/2502.conf" + }, + { + "vmid": "2503", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-hybx-1", + "status": "running", + "ip": "192.168.11.246", + "mac": "BC:24:11:36:47:87", + "config_path": "/etc/pve/nodes/r630-01/lxc/2503.conf" + }, + { + "vmid": "2504", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-hybx-2", + "status": "running", + "ip": "192.168.11.247", + "mac": "BC:24:11:D4:94:B0", + "config_path": "/etc/pve/nodes/r630-01/lxc/2504.conf" + }, + { + "vmid": "2505", + "type": "lxc", + "node": "r630-01", + "name": "besu-rpc-hybx-3", + "status": "running", + "ip": "192.168.11.248", + "mac": "BC:24:11:98:53:42", + "config_path": "/etc/pve/nodes/r630-01/lxc/2505.conf" + }, + { + "vmid": "3000", + "type": "lxc", + "node": "r630-01", + "name": "ml110", + "status": "running", + "ip": "192.168.11.60", + "mac": "BC:24:11:4C:99:5C", + "config_path": "/etc/pve/nodes/r630-01/lxc/3000.conf" + }, + { + "vmid": "3001", + "type": "lxc", + "node": "r630-01", + "name": "ml110", + "status": "running", + "ip": "192.168.11.61", + "mac": "BC:24:11:C5:F0:71", + "config_path": "/etc/pve/nodes/r630-01/lxc/3001.conf" + }, + { + "vmid": "3002", + "type": "lxc", + "node": "r630-01", + "name": "ml110", + "status": "running", + "ip": "192.168.11.62", + "mac": "BC:24:11:C5:2C:34", + "config_path": "/etc/pve/nodes/r630-01/lxc/3002.conf" + }, + { + "vmid": "3003", + "type": "lxc", + "node": "r630-01", + "name": "ml110", + "status": "running", + "ip": "192.168.11.66", + "mac": "BC:24:11:43:AB:31", + "config_path": "/etc/pve/nodes/r630-01/lxc/3003.conf" + }, + { + "vmid": "3500", + "type": "lxc", + "node": "r630-02", + "name": "oracle-publisher-1", + "status": "running", + "ip": "192.168.11.29", + "mac": "BC:24:11:A9:6A:AC", + "config_path": "/etc/pve/nodes/r630-02/lxc/3500.conf" + }, + { + "vmid": "3501", + "type": "lxc", + "node": "r630-02", + "name": "ccip-monitor-1", + "status": "running", + "ip": "192.168.11.28", + "mac": "BC:24:11:DC:02:89", + "config_path": "/etc/pve/nodes/r630-02/lxc/3501.conf" + }, + { + "vmid": "5000", + "type": "lxc", + "node": "r630-02", + "name": "blockscout-1", + "status": "running", + "ip": "192.168.11.140", + "mac": "BC:24:11:3C:58:2B", + "config_path": "/etc/pve/nodes/r630-02/lxc/5000.conf" + }, + { + "vmid": "5010", + "type": "lxc", + "node": "r630-01", + "name": "tsunamiswap", + "status": "running", + "ip": "192.168.11.91", + "mac": "BC:24:11:F4:93:56", + "config_path": "/etc/pve/nodes/r630-01/lxc/5010.conf" + }, + { + "vmid": "5200", + "type": "lxc", + "node": "r630-02", + "name": "cacti-1", + "status": "running", + "ip": "192.168.11.80", + "mac": "BC:24:11:01:5C:BE", + "config_path": "/etc/pve/nodes/r630-02/lxc/5200.conf" + }, + { + "vmid": "5201", + "type": "lxc", + "node": "r630-02", + "name": "cacti-alltra-1", + "status": "running", + "ip": "192.168.11.177", + "mac": "BC:24:11:07:41:D8", + "config_path": "/etc/pve/nodes/r630-02/lxc/5201.conf" + }, + { + "vmid": "5202", + "type": "lxc", + "node": "r630-02", + "name": "cacti-hybx-1", + "status": "running", + "ip": "192.168.11.251", + "mac": "BC:24:11:7E:0D:2A", + "config_path": "/etc/pve/nodes/r630-02/lxc/5202.conf" + }, + { + "vmid": "5700", + "type": "lxc", + "node": "r630-04", + "name": "dev-vm", + "status": "running", + "ip": "192.168.11.59", + "mac": "BC:24:11:A4:44:5C", + "config_path": "/etc/pve/nodes/r630-04/lxc/5700.conf" + }, + { + "vmid": "5701", + "type": "lxc", + "node": "r630-04", + "name": "gitea-runner-1", + "status": "running", + "ip": "192.168.11.65", + "mac": "BC:24:11:B4:B4:A5", + "config_path": "/etc/pve/nodes/r630-04/lxc/5701.conf" + }, + { + "vmid": "5702", + "type": "lxc", + "node": "r630-01", + "name": "ai-inf-1", + "status": "running", + "ip": "192.168.11.82", + "mac": "BC:24:11:24:9D:BB", + "config_path": "/etc/pve/nodes/r630-01/lxc/5702.conf" + }, + { + "vmid": "5705", + "type": "lxc", + "node": "r630-01", + "name": "ai-inf-2", + "status": "running", + "ip": "192.168.11.86", + "mac": "BC:24:11:E2:65:A0", + "config_path": "/etc/pve/nodes/r630-01/lxc/5705.conf" + }, + { + "vmid": "5751", + "type": "lxc", + "node": "r630-02", + "name": "op-stack-deployer-1", + "status": "running", + "ip": "192.168.11.69", + "mac": "BC:24:11:85:50:57", + "config_path": "/etc/pve/nodes/r630-02/lxc/5751.conf" + }, + { + "vmid": "5752", + "type": "lxc", + "node": "r630-02", + "name": "op-stack-ops-1", + "status": "running", + "ip": "192.168.11.70", + "mac": "BC:24:11:1A:5D:4F", + "config_path": "/etc/pve/nodes/r630-02/lxc/5752.conf" + }, + { + "vmid": "5800", + "type": "lxc", + "node": "r630-02", + "name": "mifos", + "status": "running", + "ip": "192.168.11.85", + "mac": "BC:24:11:EA:57:8E", + "config_path": "/etc/pve/nodes/r630-02/lxc/5800.conf" + }, + { + "vmid": "5801", + "type": "lxc", + "node": "r630-02", + "name": "dapp-smom", + "status": "running", + "ip": "192.168.11.58", + "mac": "BC:24:11:23:A9:D6", + "config_path": "/etc/pve/nodes/r630-02/lxc/5801.conf" + }, + { + "vmid": "5802", + "type": "lxc", + "node": "r630-02", + "name": "rtgs-scsm-1", + "status": "running", + "ip": "192.168.11.89", + "mac": "BC:24:11:66:0D:82", + "config_path": "/etc/pve/nodes/r630-02/lxc/5802.conf" + }, + { + "vmid": "5803", + "type": "lxc", + "node": "r630-02", + "name": "rtgs-funds-1", + "status": "running", + "ip": "192.168.11.90", + "mac": "BC:24:11:FF:2B:DD", + "config_path": "/etc/pve/nodes/r630-02/lxc/5803.conf" + }, + { + "vmid": "5804", + "type": "lxc", + "node": "r630-02", + "name": "rtgs-xau-1", + "status": "running", + "ip": "192.168.11.92", + "mac": "BC:24:11:F8:6F:94", + "config_path": "/etc/pve/nodes/r630-02/lxc/5804.conf" + }, + { + "vmid": "6000", + "type": "lxc", + "node": "r630-02", + "name": "fabric-1", + "status": "running", + "ip": "192.168.11.113", + "mac": "BC:24:11:87:40:20", + "config_path": "/etc/pve/nodes/r630-02/lxc/6000.conf" + }, + { + "vmid": "6001", + "type": "lxc", + "node": "r630-02", + "name": "fabric-alltra-1", + "status": "running", + "ip": "192.168.11.178", + "mac": "BC:24:11:27:A1:EF", + "config_path": "/etc/pve/nodes/r630-02/lxc/6001.conf" + }, + { + "vmid": "6002", + "type": "lxc", + "node": "r630-02", + "name": "fabric-hybx-1", + "status": "running", + "ip": "192.168.11.252", + "mac": "BC:24:11:8D:3B:D5", + "config_path": "/etc/pve/nodes/r630-02/lxc/6002.conf" + }, + { + "vmid": "6200", + "type": "lxc", + "node": "r630-02", + "name": "firefly-1", + "status": "running", + "ip": "192.168.11.35", + "mac": "BC:24:11:8F:0B:84", + "config_path": "/etc/pve/nodes/r630-02/lxc/6200.conf" + }, + { + "vmid": "6201", + "type": "lxc", + "node": "r630-02", + "name": "firefly-ali-1", + "status": "running", + "ip": "192.168.11.57", + "mac": "BC:24:11:A7:74:23", + "config_path": "/etc/pve/nodes/r630-02/lxc/6201.conf" + }, + { + "vmid": "6202", + "type": "lxc", + "node": "r630-02", + "name": "firefly-alltra-1", + "status": "running", + "ip": "192.168.11.175", + "mac": "BC:24:11:CF:52:A0", + "config_path": "/etc/pve/nodes/r630-02/lxc/6202.conf" + }, + { + "vmid": "6203", + "type": "lxc", + "node": "r630-02", + "name": "firefly-alltra-2", + "status": "running", + "ip": "192.168.11.176", + "mac": "BC:24:11:22:99:F8", + "config_path": "/etc/pve/nodes/r630-02/lxc/6203.conf" + }, + { + "vmid": "6204", + "type": "lxc", + "node": "r630-02", + "name": "firefly-hybx-1", + "status": "running", + "ip": "192.168.11.249", + "mac": "BC:24:11:BC:35:04", + "config_path": "/etc/pve/nodes/r630-02/lxc/6204.conf" + }, + { + "vmid": "6205", + "type": "lxc", + "node": "r630-02", + "name": "firefly-hybx-2", + "status": "running", + "ip": "192.168.11.250", + "mac": "BC:24:11:04:8A:D3", + "config_path": "/etc/pve/nodes/r630-02/lxc/6205.conf" + }, + { + "vmid": "6400", + "type": "lxc", + "node": "r630-04", + "name": "indy-1", + "status": "running", + "ip": "192.168.11.64", + "mac": "BC:24:11:F7:E8:B8", + "config_path": "/etc/pve/nodes/r630-04/lxc/6400.conf" + }, + { + "vmid": "6401", + "type": "lxc", + "node": "r630-02", + "name": "indy-alltra-1", + "status": "running", + "ip": "192.168.11.179", + "mac": "BC:24:11:36:AE:0E", + "config_path": "/etc/pve/nodes/r630-02/lxc/6401.conf" + }, + { + "vmid": "6402", + "type": "lxc", + "node": "r630-02", + "name": "indy-hybx-1", + "status": "running", + "ip": "192.168.11.253", + "mac": "BC:24:11:18:1E:D0", + "config_path": "/etc/pve/nodes/r630-02/lxc/6402.conf" + }, + { + "vmid": "6500", + "type": "lxc", + "node": "r630-02", + "name": "aries-1", + "status": "running", + "ip": "192.168.11.88", + "mac": "BC:24:11:B0:3D:CB", + "config_path": "/etc/pve/nodes/r630-02/lxc/6500.conf" + }, + { + "vmid": "6600", + "type": "lxc", + "node": "r630-02", + "name": "caliper-1", + "status": "running", + "ip": "192.168.11.93", + "mac": "BC:24:11:29:2D:8B", + "config_path": "/etc/pve/nodes/r630-02/lxc/6600.conf" + }, + { + "vmid": "7800", + "type": "lxc", + "node": "r630-01", + "name": "sankofa-api-1", + "status": "running", + "ip": "192.168.11.50", + "mac": "BC:24:11:3F:B6:CD", + "config_path": "/etc/pve/nodes/r630-01/lxc/7800.conf" + }, + { + "vmid": "7801", + "type": "lxc", + "node": "r630-01", + "name": "sankofa-portal-1", + "status": "running", + "ip": "192.168.11.51", + "mac": "BC:24:11:0D:FB:EE", + "config_path": "/etc/pve/nodes/r630-01/lxc/7801.conf" + }, + { + "vmid": "7802", + "type": "lxc", + "node": "r630-01", + "name": "sankofa-keycloak-1", + "status": "running", + "ip": "192.168.11.52", + "mac": "BC:24:11:C8:D9:B8", + "config_path": "/etc/pve/nodes/r630-01/lxc/7802.conf" + }, + { + "vmid": "7803", + "type": "lxc", + "node": "r630-01", + "name": "sankofa-postgres-1", + "status": "running", + "ip": "192.168.11.53", + "mac": "BC:24:11:AD:45:64", + "config_path": "/etc/pve/nodes/r630-01/lxc/7803.conf" + }, + { + "vmid": "7804", + "type": "lxc", + "node": "r630-04", + "name": "gov-portals-dev", + "status": "running", + "ip": "192.168.11.54", + "mac": "BC:24:11:4D:99:5B", + "config_path": "/etc/pve/nodes/r630-04/lxc/7804.conf" + }, + { + "vmid": "7805", + "type": "lxc", + "node": "r630-01", + "name": "sankofa-studio", + "status": "running", + "ip": "192.168.11.72", + "mac": "BC:24:11:55:FA:49", + "config_path": "/etc/pve/nodes/r630-01/lxc/7805.conf" + }, + { + "vmid": "7806", + "type": "lxc", + "node": "r630-01", + "name": "sankofa-public-web", + "status": "running", + "ip": "192.168.11.63", + "mac": "BC:24:11:CF:77:7B", + "config_path": "/etc/pve/nodes/r630-01/lxc/7806.conf" + }, + { + "vmid": "7807", + "type": "lxc", + "node": "r630-01", + "name": "cc-phase1-lab", + "status": "running", + "ip": "", + "mac": "BC:24:11:53:E0:B6", + "config_path": "/etc/pve/nodes/r630-01/lxc/7807.conf" + }, + { + "vmid": "7808", + "type": "lxc", + "node": "r630-01", + "name": "cc-phase1-k3s", + "status": "running", + "ip": "", + "mac": "BC:24:11:11:4F:D3", + "config_path": "/etc/pve/nodes/r630-01/lxc/7808.conf" + }, + { + "vmid": "7810", + "type": "lxc", + "node": "r630-02", + "name": "mim-web-1", + "status": "running", + "ip": "192.168.11.37", + "mac": "BC:24:11:00:78:10", + "config_path": "/etc/pve/nodes/r630-02/lxc/7810.conf" + }, + { + "vmid": "7811", + "type": "lxc", + "node": "r630-02", + "name": "mim-api-1", + "status": "running", + "ip": "192.168.11.36", + "mac": "BC:24:11:A9:5C:35", + "config_path": "/etc/pve/nodes/r630-02/lxc/7811.conf" + }, + { + "vmid": "7815", + "type": "lxc", + "node": "r630-02", + "name": "cc-phase1-lab", + "status": "running", + "ip": "192.168.11.75", + "mac": "BC:24:11:E8:A4:D4", + "config_path": "/etc/pve/nodes/r630-02/lxc/7815.conf" + }, + { + "vmid": "8604", + "type": "lxc", + "node": "r630-01", + "name": "currencicombo-phoenix-1", + "status": "running", + "ip": "10.160.0.14", + "mac": "BC:24:11:E9:19:A4", + "config_path": "/etc/pve/nodes/r630-01/lxc/8604.conf" + }, + { + "vmid": "8640", + "type": "lxc", + "node": "r630-04", + "name": "vault-phoenix-1", + "status": "running", + "ip": "192.168.11.200", + "mac": "BC:24:11:F2:4F:D4", + "config_path": "/etc/pve/nodes/r630-04/lxc/8640.conf" + }, + { + "vmid": "8641", + "type": "lxc", + "node": "r630-02", + "name": "vault-phoenix-2", + "status": "running", + "ip": "192.168.11.215", + "mac": "BC:24:11:DA:A1:7F", + "config_path": "/etc/pve/nodes/r630-02/lxc/8641.conf" + }, + { + "vmid": "8642", + "type": "lxc", + "node": "r630-04", + "name": "vault-phoenix-3", + "status": "running", + "ip": "192.168.11.202", + "mac": "BC:24:11:E4:BD:63", + "config_path": "/etc/pve/nodes/r630-04/lxc/8642.conf" + }, + { + "vmid": "8811", + "type": "qemu", + "node": "r630-04", + "name": "sankofa-proxmox-mcp", + "status": "running", + "ip": "192.168.11.111", + "mac": "BC:24:11:A9:4D:28", + "config_path": "/etc/pve/nodes/r630-04/qemu-server/8811.conf" + }, + { + "vmid": "8812", + "type": "qemu", + "node": "r630-04", + "name": "operator-services", + "status": "running", + "ip": "192.168.11.112", + "mac": "BC:24:11:3B:B7:0C", + "config_path": "/etc/pve/nodes/r630-04/qemu-server/8812.conf" + }, + { + "vmid": "10000", + "type": "lxc", + "node": "r630-01", + "name": "order-postgres-primary", + "status": "running", + "ip": "192.168.11.44", + "mac": "BC:24:11:35:7B:92", + "config_path": "/etc/pve/nodes/r630-01/lxc/10000.conf" + }, + { + "vmid": "10001", + "type": "lxc", + "node": "r630-01", + "name": "order-postgres-replica", + "status": "running", + "ip": "192.168.11.45", + "mac": "BC:24:11:80:47:EB", + "config_path": "/etc/pve/nodes/r630-01/lxc/10001.conf" + }, + { + "vmid": "10020", + "type": "lxc", + "node": "r630-04", + "name": "order-redis", + "status": "running", + "ip": "192.168.11.38", + "mac": "BC:24:11:92:83:A5", + "config_path": "/etc/pve/nodes/r630-04/lxc/10020.conf" + }, + { + "vmid": "10030", + "type": "lxc", + "node": "r630-01", + "name": "order-identity", + "status": "running", + "ip": "192.168.11.40", + "mac": "BC:24:11:45:33:5A", + "config_path": "/etc/pve/nodes/r630-01/lxc/10030.conf" + }, + { + "vmid": "10040", + "type": "lxc", + "node": "r630-01", + "name": "order-intake", + "status": "running", + "ip": "192.168.11.41", + "mac": "BC:24:11:35:29:44", + "config_path": "/etc/pve/nodes/r630-01/lxc/10040.conf" + }, + { + "vmid": "10050", + "type": "lxc", + "node": "r630-01", + "name": "order-finance", + "status": "running", + "ip": "192.168.11.49", + "mac": "BC:24:11:1D:5C:DA", + "config_path": "/etc/pve/nodes/r630-01/lxc/10050.conf" + }, + { + "vmid": "10060", + "type": "lxc", + "node": "r630-01", + "name": "order-dataroom", + "status": "running", + "ip": "192.168.11.42", + "mac": "BC:24:11:1F:36:6C", + "config_path": "/etc/pve/nodes/r630-01/lxc/10060.conf" + }, + { + "vmid": "10070", + "type": "lxc", + "node": "r630-04", + "name": "order-legal", + "status": "running", + "ip": "192.168.11.87", + "mac": "BC:24:11:BB:F4:C3", + "config_path": "/etc/pve/nodes/r630-04/lxc/10070.conf" + }, + { + "vmid": "10080", + "type": "lxc", + "node": "r630-01", + "name": "order-eresidency", + "status": "running", + "ip": "192.168.11.43", + "mac": "BC:24:11:C3:42:75", + "config_path": "/etc/pve/nodes/r630-01/lxc/10080.conf" + }, + { + "vmid": "10090", + "type": "lxc", + "node": "r630-04", + "name": "order-portal-public", + "status": "running", + "ip": "192.168.11.180", + "mac": "BC:24:11:D6:DB:B5", + "config_path": "/etc/pve/nodes/r630-04/lxc/10090.conf" + }, + { + "vmid": "10091", + "type": "lxc", + "node": "r630-04", + "name": "order-portal-internal", + "status": "running", + "ip": "192.168.11.181", + "mac": "BC:24:11:47:A0:35", + "config_path": "/etc/pve/nodes/r630-04/lxc/10091.conf" + }, + { + "vmid": "10092", + "type": "lxc", + "node": "r630-04", + "name": "order-mcp-legal", + "status": "running", + "ip": "192.168.11.182", + "mac": "BC:24:11:DB:87:0C", + "config_path": "/etc/pve/nodes/r630-04/lxc/10092.conf" + }, + { + "vmid": "10100", + "type": "lxc", + "node": "r630-01", + "name": "dbis-postgres", + "status": "running", + "ip": "192.168.11.105", + "mac": "BC:24:11:73:66:08", + "config_path": "/etc/pve/nodes/r630-01/lxc/10100.conf" + }, + { + "vmid": "10101", + "type": "lxc", + "node": "r630-01", + "name": "dbis-postgres-replica", + "status": "running", + "ip": "192.168.11.106", + "mac": "BC:24:11:32:2F:07", + "config_path": "/etc/pve/nodes/r630-01/lxc/10101.conf" + }, + { + "vmid": "10120", + "type": "lxc", + "node": "r630-01", + "name": "dbis-redis", + "status": "running", + "ip": "192.168.11.125", + "mac": "BC:24:11:F0:FE:A2", + "config_path": "/etc/pve/nodes/r630-01/lxc/10120.conf" + }, + { + "vmid": "10130", + "type": "lxc", + "node": "r630-01", + "name": "dbis-frontend", + "status": "running", + "ip": "192.168.11.130", + "mac": "BC:24:11:62:F5:F7", + "config_path": "/etc/pve/nodes/r630-01/lxc/10130.conf" + }, + { + "vmid": "10150", + "type": "lxc", + "node": "r630-01", + "name": "dbis-api-primary", + "status": "running", + "ip": "192.168.11.155", + "mac": "BC:24:11:AA:59:F8", + "config_path": "/etc/pve/nodes/r630-01/lxc/10150.conf" + }, + { + "vmid": "10151", + "type": "lxc", + "node": "r630-01", + "name": "dbis-api-secondary", + "status": "running", + "ip": "192.168.11.156", + "mac": "BC:24:11:7A:D2:A6", + "config_path": "/etc/pve/nodes/r630-01/lxc/10151.conf" + }, + { + "vmid": "10200", + "type": "lxc", + "node": "r630-04", + "name": "order-prometheus", + "status": "running", + "ip": "192.168.11.46", + "mac": "BC:24:11:57:63:08", + "config_path": "/etc/pve/nodes/r630-04/lxc/10200.conf" + }, + { + "vmid": "10201", + "type": "lxc", + "node": "r630-04", + "name": "order-grafana", + "status": "running", + "ip": "192.168.11.47", + "mac": "BC:24:11:AF:10:34", + "config_path": "/etc/pve/nodes/r630-04/lxc/10201.conf" + }, + { + "vmid": "10202", + "type": "lxc", + "node": "r630-01", + "name": "order-opensearch", + "status": "running", + "ip": "192.168.11.48", + "mac": "BC:24:11:A8:8C:F6", + "config_path": "/etc/pve/nodes/r630-01/lxc/10202.conf" + }, + { + "vmid": "10203", + "type": "lxc", + "node": "r630-01", + "name": "omdnl-org-web", + "status": "running", + "ip": "192.168.11.228", + "mac": "BC:24:11:C2:EA:A0", + "config_path": "/etc/pve/nodes/r630-01/lxc/10203.conf" + }, + { + "vmid": "10210", + "type": "lxc", + "node": "r630-04", + "name": "order-haproxy", + "status": "running", + "ip": "192.168.11.39", + "mac": "BC:24:11:3E:9B:A6", + "config_path": "/etc/pve/nodes/r630-04/lxc/10210.conf" + }, + { + "vmid": "10230", + "type": "lxc", + "node": "r630-04", + "name": "order-vault", + "status": "running", + "ip": "192.168.11.55", + "mac": "BC:24:11:EF:4C:E7", + "config_path": "/etc/pve/nodes/r630-04/lxc/10230.conf" + }, + { + "vmid": "10232", + "type": "lxc", + "node": "r630-01", + "name": "CT10232", + "status": "running", + "ip": "192.168.11.56", + "mac": "BC:24:11:A6:74:63", + "config_path": "/etc/pve/nodes/r630-01/lxc/10232.conf" + }, + { + "vmid": "10233", + "type": "lxc", + "node": "r630-01", + "name": "npmplus", + "status": "running", + "ip": "192.168.11.166", + "mac": "BC:24:11:6F:EB:A0", + "config_path": "/etc/pve/nodes/r630-01/lxc/10233.conf", + "ips": [ + "192.168.11.166", + "192.168.11.167" + ] + }, + { + "vmid": "10234", + "type": "lxc", + "node": "r630-02", + "name": "npmplus-secondary", + "status": "running", + "ip": "192.168.11.168", + "mac": "BC:24:11:8D:EC:B7", + "config_path": "/etc/pve/nodes/r630-02/lxc/10234.conf" + }, + { + "vmid": "10235", + "type": "lxc", + "node": "r630-01", + "name": "npmplus-alltra-hybx", + "status": "running", + "ip": "192.168.11.169", + "mac": "BC:24:11:97:15:70", + "config_path": "/etc/pve/nodes/r630-01/lxc/10235.conf" + }, + { + "vmid": "10236", + "type": "lxc", + "node": "r630-01", + "name": "npmplus-fourth", + "status": "running", + "ip": "192.168.11.170", + "mac": "BC:24:11:9D:4E:64", + "config_path": "/etc/pve/nodes/r630-01/lxc/10236.conf" + }, + { + "vmid": "10237", + "type": "lxc", + "node": "r630-02", + "name": "npmplus-mifos", + "status": "running", + "ip": "192.168.11.171", + "mac": "BC:24:11:DA:2A:D0", + "config_path": "/etc/pve/nodes/r630-02/lxc/10237.conf" + }, + { + "vmid": "10381", + "type": "lxc", + "node": "r630-03", + "name": "treasury-dealflow", + "status": "running", + "ip": "192.168.11.94", + "mac": "BC:24:11:2A:75:F6", + "config_path": "/etc/pve/nodes/r630-03/lxc/10381.conf" + }, + { + "vmid": "10900", + "type": "qemu", + "node": "r630-01", + "name": "mailcow-dbis", + "status": "running", + "ip": "192.168.11.115", + "mac": "BC:24:11:89:3B:9D", + "config_path": "/etc/pve/nodes/r630-01/qemu-server/10900.conf" + } + ] +} \ No newline at end of file diff --git a/scripts/comprehensive-proxmox-inventory.py b/scripts/comprehensive-proxmox-inventory.py index 2a438ae0..9f283209 100755 --- a/scripts/comprehensive-proxmox-inventory.py +++ b/scripts/comprehensive-proxmox-inventory.py @@ -12,6 +12,10 @@ from pathlib import Path from typing import Dict, List, Optional, Any from collections import defaultdict +_SCRIPT_DIR = Path(__file__).resolve().parent +sys.path.insert(0, str(_SCRIPT_DIR / "lib")) +from proxmox_guest_lan_ips import parse_guest_network_from_config + # Proxmox Hosts PROXMOX_HOSTS = { "ml110": "192.168.11.10", @@ -121,22 +125,17 @@ def get_vm_config(host: str, node: str, vmid: str, vm_type: str) -> Dict[str, An def get_vm_ip(host: str, node: str, vmid: str, vm_type: str) -> Optional[str]: """Get VM IP address""" + config = get_vm_config(host, node, vmid, vm_type) + static_ip = parse_guest_network_from_config(config).primary_ip + if static_ip: + return static_ip + if vm_type == 'lxc': - # For LXC, get IP from config or running container - config = get_vm_config(host, node, vmid, vm_type) - net_config = config.get('net0', '') - if 'ip=' in net_config: - ip_part = net_config.split('ip=')[1].split(',')[0].split('/')[0] - if ip_part and ip_part not in ['dhcp', 'auto']: - return ip_part - - # Try to get IP from running container if config.get('status') == 'running': ip = run_ssh_command(host, f"pct exec {vmid} -- hostname -I 2>/dev/null | awk '{{print $1}}'") if ip and not ip.startswith('127.'): return ip else: - # For QEMU, try agent command = f"pvesh get /nodes/{node}/qemu/{vmid}/agent/network-get-interfaces --output-format json" output = run_ssh_command(host, command) if output: @@ -150,7 +149,7 @@ def get_vm_ip(host: str, node: str, vmid: str, vm_type: str) -> Optional[str]: return ip_info['ip-address'] except json.JSONDecodeError: pass - + return None def get_vm_hostname(host: str, node: str, vmid: str, vm_type: str) -> Optional[str]: diff --git a/scripts/it-ops/compute_ipam_drift.py b/scripts/it-ops/compute_ipam_drift.py new file mode 100755 index 00000000..e2415b52 --- /dev/null +++ b/scripts/it-ops/compute_ipam_drift.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +"""Merge live JSON with config/ip-addresses.conf; write live_inventory.json + drift.json.""" +from __future__ import annotations + +import argparse +import json +import re +import sys +from pathlib import Path + +IPV4_RE = re.compile( + r"(? bool: + return ip.startswith("192.168.11.") + + +def parse_all_vmids_markdown(path: Path) -> tuple[set[str], dict[str, str]]: + """Extract declared LAN IPs and vmid->ip from ALL_VMIDS pipe tables.""" + ips: set[str] = set() + vmid_to_ip: dict[str, str] = {} + if not path.is_file(): + return ips, vmid_to_ip + for line in path.read_text(encoding="utf-8", errors="replace").splitlines(): + m = MD_VMID_IP_ROW.match(line.strip()) + if not m: + continue + vmid, ip = m.group(1), m.group(2) + if is_lan_11(ip): + ips.add(ip) + vmid_to_ip[vmid] = ip + return ips, vmid_to_ip + + +def parse_ip_addresses_conf(path: Path) -> tuple[dict[str, str], set[str]]: + var_map: dict[str, str] = {} + all_ips: set[str] = set() + if not path.is_file(): + return var_map, all_ips + for line in path.read_text(encoding="utf-8", errors="replace").splitlines(): + s = line.strip() + if not s or s.startswith("#") or "=" not in s: + continue + key, _, val = s.partition("=") + key = key.strip() + val = val.strip() + if val.startswith('"') and val.endswith('"'): + val = val[1:-1] + elif val.startswith("'") and val.endswith("'"): + val = val[1:-1] + var_map[key] = val + for m in IPV4_RE.findall(val): + all_ips.add(m) + return var_map, all_ips + + +def hypervisor_related_keys(var_map: dict[str, str]) -> set[str]: + keys = set() + for k in var_map: + ku = k.upper() + if any( + x in ku + for x in ( + "PROXMOX_HOST", + "PROXMOX_ML110", + "PROXMOX_R630", + "PROXMOX_R750", + "WAN_AGGREGATOR", + "NETWORK_GATEWAY", + "UDM_PRO", + "PUBLIC_IP_GATEWAY", + "PUBLIC_IP_ER605", + ) + ): + keys.add(k) + return keys + + +def main() -> None: + ap = argparse.ArgumentParser() + ap.add_argument("--live", type=Path, help="live JSON file (default stdin)") + ap.add_argument( + "--ip-conf", + type=Path, + default=Path("config/ip-addresses.conf"), + help="path to ip-addresses.conf", + ) + ap.add_argument("--out-dir", type=Path, required=True) + ap.add_argument( + "--all-vmids-md", + type=Path, + default=None, + help="optional ALL_VMIDS_ENDPOINTS.md for declared VMID/IP tables", + ) + args = ap.parse_args() + + if args.live: + live_raw = args.live.read_text(encoding="utf-8") + else: + live_raw = sys.stdin.read() + + try: + live = json.loads(live_raw) + except json.JSONDecodeError as e: + print(f"Invalid live JSON: {e}", file=sys.stderr) + sys.exit(1) + + guests = live.get("guests") or [] + var_map, conf_ips = parse_ip_addresses_conf(args.ip_conf) + doc_ips: set[str] = set() + vmid_to_ip_doc: dict[str, str] = {} + if args.all_vmids_md: + doc_ips, vmid_to_ip_doc = parse_all_vmids_markdown(args.all_vmids_md) + + declared_union = conf_ips | doc_ips + hyp_keys = hypervisor_related_keys(var_map) + hyp_ips: set[str] = set() + for k in hyp_keys: + if k not in var_map: + continue + for m in IPV4_RE.findall(var_map[k]): + hyp_ips.add(m) + + ip_to_rows: dict[str, list[dict]] = {} + vmid_to_ip_live: dict[str, str] = {} + live_vmids_all: set[str] = set() + for g in guests: + ip = (g.get("ip") or "").strip() + vmid = str(g.get("vmid", "")).strip() + if vmid: + live_vmids_all.add(vmid) + if ip: + ip_to_rows.setdefault(ip, []).append(g) + if vmid and ip: + vmid_to_ip_live[vmid] = ip + + doc_vmids = set(vmid_to_ip_doc.keys()) + vmids_in_all_vmids_doc_not_on_cluster = sorted( + doc_vmids - live_vmids_all, key=lambda x: int(x) if x.isdigit() else 0 + ) + only_live_not_in_doc = live_vmids_all - doc_vmids + vmids_on_cluster_not_in_all_vmids_table_count = len(only_live_not_in_doc) + vmids_on_cluster_not_in_all_vmids_table_sample = sorted( + only_live_not_in_doc, key=lambda x: int(x) if x.isdigit() else 0 + )[:100] + + ip_to_vmids: dict[str, list[str]] = { + ip: [str(r.get("vmid", "") or "?").strip() or "?" for r in rows] + for ip, rows in ip_to_rows.items() + } + + duplicate_ips: dict[str, list[str]] = {} + same_name_duplicate_ip: dict[str, list[str]] = {} + for ip, rows in ip_to_rows.items(): + if len(rows) < 2: + continue + names = {(str(r.get("name") or "").strip().lower()) for r in rows} + names.discard("") + vmids = [str(r.get("vmid", "") or "?").strip() or "?" for r in rows] + if len(names) == 1: + # Same guest name on multiple VMIDs (e.g. clone/migration) — informational only. + same_name_duplicate_ip[ip] = sorted(vmids, key=lambda x: int(x) if x.isdigit() else 0) + else: + duplicate_ips[ip] = vmids + + guest_ip_set = set(ip_to_vmids.keys()) + + conf_only = sorted(conf_ips - guest_ip_set - hyp_ips) + live_only_legacy = sorted(guest_ip_set - conf_ips) + + declared_lan11 = {ip for ip in declared_union if is_lan_11(ip)} + guest_lan11 = {ip for ip in guest_ip_set if is_lan_11(ip)} + guest_lan_not_declared = sorted( + guest_lan11 - declared_union - hyp_ips + ) + declared_lan11_not_on_guests = sorted( + declared_lan11 - guest_ip_set - hyp_ips + ) + + vmid_ip_mismatch: list[dict[str, str]] = [] + for vmid, doc_ip in vmid_to_ip_doc.items(): + lip = vmid_to_ip_live.get(vmid) + if lip and doc_ip and lip != doc_ip: + vmid_ip_mismatch.append( + {"vmid": vmid, "live_ip": lip, "all_vmids_doc_ip": doc_ip} + ) + + drift = { + "collected_at": live.get("collected_at"), + "guest_count": len(guests), + "duplicate_ips": duplicate_ips, + "same_name_duplicate_ip_guests": same_name_duplicate_ip, + "guest_ips_not_in_ip_addresses_conf": live_only_legacy, + "ip_addresses_conf_ips_not_on_guests": conf_only, + "guest_lan_ips_not_in_declared_sources": guest_lan_not_declared, + "declared_lan11_ips_not_on_live_guests": declared_lan11_not_on_guests, + "vmid_ip_mismatch_live_vs_all_vmids_doc": vmid_ip_mismatch, + "vmids_in_all_vmids_doc_not_on_cluster": vmids_in_all_vmids_doc_not_on_cluster, + "vmids_on_cluster_not_in_all_vmids_table": { + "count": vmids_on_cluster_not_in_all_vmids_table_count, + "sample_vmids": vmids_on_cluster_not_in_all_vmids_table_sample, + "note": "ALL_VMIDS_ENDPOINTS pipe tables do not list every guest; large count is normal.", + }, + "hypervisor_and_infra_ips_excluded_from_guest_match": sorted(hyp_ips), + "declared_sources": { + "ip_addresses_conf_ipv4_count": len(conf_ips), + "all_vmids_md_lan11_count": len(doc_ips), + "all_vmids_md_row_count": len(doc_vmids), + }, + "notes": [], + } + if live.get("error"): + drift["notes"].append(str(live["error"])) + if same_name_duplicate_ip: + drift["notes"].append( + "same_name_duplicate_ip_guests: multiple VMIDs share an IP but identical " + "guest name — resolve duplicate CTs/VMs in Proxmox; drift exit code not raised." + ) + + inv_out = { + "collected_at": live.get("collected_at"), + "source": "proxmox_cluster_pvesh_plus_config", + "guests": guests, + } + neigh = live.get("ip_neigh_vmbr0_sample") + if isinstance(neigh, dict): + inv_out["ip_neigh_vmbr0_sample"] = neigh + + args.out_dir.mkdir(parents=True, exist_ok=True) + (args.out_dir / "live_inventory.json").write_text( + json.dumps(inv_out, indent=2), encoding="utf-8" + ) + (args.out_dir / "drift.json").write_text( + json.dumps(drift, indent=2), encoding="utf-8" + ) + print(f"Wrote {args.out_dir / 'live_inventory.json'}") + print(f"Wrote {args.out_dir / 'drift.json'}") + # Exit 2 only when the same LAN IP is claimed by guests with different names + # (likely address conflict). Same-name clones are in same_name_duplicate_ip_guests only. + sys.exit(2 if duplicate_ips else 0) + + +if __name__ == "__main__": + main() diff --git a/scripts/it-ops/export-live-inventory-and-drift.sh b/scripts/it-ops/export-live-inventory-and-drift.sh new file mode 100755 index 00000000..24701db5 --- /dev/null +++ b/scripts/it-ops/export-live-inventory-and-drift.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# Live Proxmox guest inventory + drift vs config/ip-addresses.conf. +# Usage: bash scripts/it-ops/export-live-inventory-and-drift.sh +# Requires: SSH key root@SEED, python3 locally and on PVE. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +# shellcheck source=/dev/null +source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true +SEED="${SEED_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}" +OUT_DIR="${OUT_DIR:-${PROJECT_ROOT}/reports/status}" +TS="$(date +%Y%m%d_%H%M%S)" +TMP="${TMPDIR:-/tmp}/live_inv_${TS}.json" +PY="${SCRIPT_DIR}/lib/collect_inventory_remote.py" +LAN_IPS_PY="${PROJECT_ROOT}/scripts/lib/proxmox_guest_lan_ips.py" + +mkdir -p "$OUT_DIR" + +stub_unreachable() { + python3 - <<'PY' +import json +from datetime import datetime, timezone +print(json.dumps({ + "collected_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), + "error": "seed_unreachable", + "guests": [], +}, indent=2)) +PY +} + +if ! ping -c1 -W2 "$SEED" >/dev/null 2>&1; then + stub_unreachable >"$TMP" +else + REMOTE_DIR="$(ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no \ + "root@${SEED}" 'mktemp -d /tmp/pve-inv-collect.XXXXXX')" + cleanup_remote() { + ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no \ + "root@${SEED}" "rm -rf '${REMOTE_DIR}'" 2>/dev/null || true + } + trap cleanup_remote EXIT + scp -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no \ + "$LAN_IPS_PY" "$PY" "root@${SEED}:${REMOTE_DIR}/" >/dev/null + REMOTE_ENV=() + case "${IT_COLLECT_IP_NEIGH:-}" in + 1|yes|true|TRUE|Yes) REMOTE_ENV+=(IT_COLLECT_IP_NEIGH=1) ;; + esac + if ! ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no \ + "root@${SEED}" \ + "PYTHONPATH='${REMOTE_DIR}' ${REMOTE_ENV[*]} python3 '${REMOTE_DIR}/collect_inventory_remote.py'" \ + >"$TMP" 2>/dev/null; then + stub_unreachable >"$TMP" + fi + trap - EXIT + cleanup_remote +fi + +set +e +python3 "${SCRIPT_DIR}/compute_ipam_drift.py" --live "$TMP" \ + --ip-conf "${PROJECT_ROOT}/config/ip-addresses.conf" \ + --all-vmids-md "${PROJECT_ROOT}/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md" \ + --out-dir "$OUT_DIR" +DRIFT_RC=$? +set -e + +cp -f "$OUT_DIR/live_inventory.json" "${OUT_DIR}/live_inventory_${TS}.json" 2>/dev/null || true +cp -f "$OUT_DIR/drift.json" "${OUT_DIR}/drift_${TS}.json" 2>/dev/null || true +rm -f "$TMP" +if [[ -n "${IT_BFF_SNAPSHOT_DB:-}" ]]; then + python3 "${SCRIPT_DIR}/persist-it-snapshot-sqlite.py" "$IT_BFF_SNAPSHOT_DB" "$OUT_DIR" "${DRIFT_RC}" 2>/dev/null || true +fi +echo "Latest: ${OUT_DIR}/live_inventory.json , ${OUT_DIR}/drift.json" +exit "${DRIFT_RC}" diff --git a/scripts/it-ops/lib/collect_inventory_remote.py b/scripts/it-ops/lib/collect_inventory_remote.py new file mode 100755 index 00000000..c61f4b48 --- /dev/null +++ b/scripts/it-ops/lib/collect_inventory_remote.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +"""Run ON a Proxmox cluster node (as root). Stdout: JSON live guest inventory.""" +from __future__ import annotations + +import json +import os +import subprocess +import sys +from datetime import datetime, timezone +from pathlib import Path + +_SCRIPT_DIR = Path(__file__).resolve().parent + + +def _parser_search_paths() -> list[Path]: + paths: list[Path] = [] + for entry in os.environ.get("PYTHONPATH", "").split(":"): + if entry: + paths.append(Path(entry)) + paths.append(_SCRIPT_DIR) + for parent in _SCRIPT_DIR.parents: + paths.append(parent / "lib") + paths.append(parent / "scripts" / "lib") + return paths + + +for _path in _parser_search_paths(): + if (_path / "proxmox_guest_lan_ips.py").is_file(): + sys.path.insert(0, str(_path)) + break + +from proxmox_guest_lan_ips import ( # noqa: E402 + parse_guest_network_from_conf_text, + parse_guest_network_from_config, +) + + +def _run(cmd: list[str]) -> str: + return subprocess.check_output(cmd, text=True, stderr=subprocess.DEVNULL) + + +def _read_config(path: str) -> str: + try: + with open(path, encoding="utf-8", errors="replace") as f: + return f.read() + except OSError: + return "" + + +def _guest_network( + guest_type: str, node: str, vmid_s: str, body: str +) -> tuple[str, str, tuple[str, ...]]: + if body.strip(): + net = parse_guest_network_from_conf_text(body) + else: + net = parse_guest_network_from_config({}) + if not net.ips: + try: + cfg_raw = _run( + [ + "pvesh", + "get", + f"/nodes/{node}/{guest_type}/{vmid_s}/config", + "--output-format", + "json", + ] + ) + net = parse_guest_network_from_config(json.loads(cfg_raw)) + except (subprocess.CalledProcessError, json.JSONDecodeError, OSError): + pass + return net.primary_ip, (net.macs[0] if net.macs else ""), net.ips + + +def main() -> None: + collected_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + try: + raw = _run( + ["pvesh", "get", "/cluster/resources", "--output-format", "json"] + ) + resources = json.loads(raw) + except (subprocess.CalledProcessError, json.JSONDecodeError) as e: + json.dump( + { + "collected_at": collected_at, + "error": f"pvesh_cluster_resources_failed: {e}", + "guests": [], + }, + sys.stdout, + indent=2, + ) + return + + guests: list[dict] = [] + for r in resources: + t = r.get("type") + if t not in ("lxc", "qemu"): + continue + vmid = r.get("vmid") + node = r.get("node") + if vmid is None or not node: + continue + vmid_s = str(vmid) + name = r.get("name") or "" + status = r.get("status") or "" + + if t == "lxc": + cfg_path = f"/etc/pve/nodes/{node}/lxc/{vmid_s}.conf" + else: + cfg_path = f"/etc/pve/nodes/{node}/qemu-server/{vmid_s}.conf" + + body = _read_config(cfg_path) + ip, mac, ips = _guest_network(t, str(node), vmid_s, body) + + guest: dict = { + "vmid": vmid_s, + "type": t, + "node": str(node), + "name": name, + "status": status, + "ip": ip, + "mac": mac, + "config_path": cfg_path, + } + if len(ips) > 1: + guest["ips"] = list(ips) + guests.append(guest) + + out: dict = { + "collected_at": collected_at, + "source": "proxmox_cluster_pvesh_plus_config", + "guests": sorted(guests, key=lambda g: int(g["vmid"])), + } + + if os.environ.get("IT_COLLECT_IP_NEIGH", "").strip().lower() in ( + "1", + "yes", + "true", + ): + neigh_lines: list[str] = [] + try: + raw_neigh = subprocess.check_output( + ["ip", "-4", "neigh", "show", "dev", "vmbr0"], + text=True, + stderr=subprocess.DEVNULL, + timeout=30, + ) + neigh_lines = [ + ln.strip() for ln in raw_neigh.splitlines() if ln.strip() + ][:500] + except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError): + neigh_lines = [] + out["ip_neigh_vmbr0_sample"] = { + "collected_at": collected_at, + "line_count": len(neigh_lines), + "lines": neigh_lines, + } + + json.dump(out, sys.stdout, indent=2) + + +if __name__ == "__main__": + main() diff --git a/scripts/lib/proxmox_guest_lan_ips.py b/scripts/lib/proxmox_guest_lan_ips.py new file mode 100644 index 00000000..54ebc5fa --- /dev/null +++ b/scripts/lib/proxmox_guest_lan_ips.py @@ -0,0 +1,113 @@ +"""Parse static LAN IPv4/MAC from Proxmox LXC/QEMU guest config (net* / ipconfig*).""" +from __future__ import annotations + +import re +from dataclasses import dataclass + +_IP_VALUE_RE = re.compile( + r"(?:^|[,=])ip=([0-9]{1,3}(?:\.[0-9]{1,3}){3})(?:/|,|$)", + re.IGNORECASE, +) +_HWADDR_RE = re.compile(r"hwaddr=([0-9A-Fa-f:]+)", re.IGNORECASE) +_VIRTIO_MAC_RE = re.compile( + r"(?:^|[,=])virtio=([0-9A-Fa-f:]+)(?:,|$)", + re.IGNORECASE, +) +_NIC_INDEX_RE = re.compile(r"^(net|ipconfig)(\d+)$", re.IGNORECASE) + + +@dataclass(frozen=True) +class GuestLanNetwork: + ips: tuple[str, ...] + macs: tuple[str, ...] + primary_lan11: str | None + + @property + def primary_ip(self) -> str: + if self.primary_lan11: + return self.primary_lan11 + return self.ips[0] if self.ips else "" + + +def _nic_index(key: str) -> tuple[str, int] | None: + m = _NIC_INDEX_RE.match(key) + if not m: + return None + return m.group(1).lower(), int(m.group(2)) + + +def _extract_ipv4(value: str) -> str | None: + m = _IP_VALUE_RE.search(value) + if not m: + return None + ip = m.group(1) + if ip in ("dhcp", "auto"): + return None + return ip + + +def _extract_mac(value: str) -> str | None: + m = _HWADDR_RE.search(value) + if m: + return m.group(1) + m = _VIRTIO_MAC_RE.search(value) + if m: + return m.group(1) + return None + + +def _ordered_nic_keys(keys: list[str]) -> list[str]: + indexed: list[tuple[int, int, str]] = [] + for key in keys: + parsed = _nic_index(key) + if not parsed: + continue + kind, idx = parsed + kind_order = 0 if kind == "ipconfig" else 1 + indexed.append((idx, kind_order, key)) + indexed.sort() + return [key for _, _, key in indexed] + + +def parse_guest_network_from_config(config: dict[str, object]) -> GuestLanNetwork: + """Return static IPv4/MAC from pvesh config dict or parsed .conf key/value map.""" + ips: list[str] = [] + macs: list[str] = [] + for key in _ordered_nic_keys(list(config.keys())): + raw = config.get(key) + if not isinstance(raw, str): + continue + kind, _ = _nic_index(key) or ("", 0) + if kind == "ipconfig": + ip = _extract_ipv4(raw) + if ip: + ips.append(ip) + continue + if kind == "net": + ip = _extract_ipv4(raw) + if ip: + ips.append(ip) + mac = _extract_mac(raw) + if mac: + macs.append(mac) + dedup_ips = tuple(dict.fromkeys(ips)) + dedup_macs = tuple(dict.fromkeys(macs)) + primary_lan11 = next((ip for ip in dedup_ips if ip.startswith("192.168.11.")), None) + return GuestLanNetwork( + ips=dedup_ips, + macs=dedup_macs, + primary_lan11=primary_lan11, + ) + + +def parse_guest_network_from_conf_text(body: str) -> GuestLanNetwork: + config: dict[str, str] = {} + for line in body.splitlines(): + if ":" not in line: + continue + key, value = line.split(":", 1) + key = key.strip() + if not key: + continue + config[key] = value.strip() + return parse_guest_network_from_config(config) diff --git a/scripts/recreate-containers-privileged-and-complete-all.sh b/scripts/recreate-containers-privileged-and-complete-all.sh index 479fb151..bf4a17f9 100644 --- a/scripts/recreate-containers-privileged-and-complete-all.sh +++ b/scripts/recreate-containers-privileged-and-complete-all.sh @@ -150,9 +150,9 @@ declare -A CONTAINERS=( ["10060"]="order-dataroom:${IP_SERVICE_42:-${IP_SERVICE_42:-${IP_SERVICE_42:-192.168.11.42}}}:2048:2:20" ["10070"]="order-legal:${IP_ORDER_LEGAL:-192.168.11.87}:2048:2:20" ["10080"]="order-eresidency:${IP_SERVICE_43:-${IP_SERVICE_43:-${IP_SERVICE_43:-192.168.11.43}}}:2048:2:20" - ["10090"]="order-portal-public:${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}}}:2048:2:20" - ["10091"]="order-portal-internal:${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}}}:2048:2:20" - ["10092"]="order-mcp-legal:${IP_MIM_WEB:-192.168.11.37}:2048:2:20" + ["10090"]="order-portal-public:${ORDER_PORTAL_PUBLIC_IP:-192.168.11.180}:2048:2:20" + ["10091"]="order-portal-internal:${ORDER_PORTAL_INTERNAL_IP:-192.168.11.181}:2048:2:20" + ["10092"]="order-mcp-legal:${ORDER_MCP_LEGAL_IP:-192.168.11.182}:2048:2:20" ["10100"]="dbis-postgres-primary:${PROXMOX_HOST_ML110}5:4096:4:50" ["10101"]="dbis-postgres-replica-1:${PROXMOX_HOST_ML110}6:4096:4:50" ["10120"]="dbis-redis:${PROXMOX_HOST_R630_02}0:2048:2:20"