Compare commits

...

7 Commits

Author SHA1 Message Date
Devin AI
ad94815c9d chore: drop tracked local-machine artifacts (Phase 4)
Some checks failed
AI Code Review / claude-review (pull_request) Failing after 48s
Remove 1543 files that were committed from developer machines before
matching .gitignore rules were added. All patterns involved are already
ignored; this just cleans up the historical tracking.

- venv/                     (1539 files, Python virtualenv)
- __pycache__/              (1 file — root; all 706 under venv/ covered above)
- home/intlc/projects/...    (1 file — stale copy of multi-chain-execution/src/api/mirror-routes.ts;
                              the canonical in-repo version is newer and has comments)
- .phase1-event-status       (ephemeral phase marker)
- .gitignore.backup.20260103_171034 (pre-cleanup .gitignore snapshot)

Also add '/home/' to .gitignore so future operator-home paste captures
don't get re-introduced.

No working code changed — every deletion is of a build artifact, virtualenv
content, or stale duplicate.

Tracking: #1
Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
2026-04-18 20:05:05 +00:00
defiQUG
d63efcb315 fix(ops): map dev VM 5700 to r630-04; add phoenix-deploy-api deploy script
All checks were successful
Deploy to Phoenix / deploy (push) Successful in 9s
Sync get_host_for_vmid with live placement for VMID 5700 (dev-vm). Add
deploy-phoenix-deploy-api-to-dev-vm.sh for pct-based install to CT 5700.

Made-with: Cursor
2026-04-17 04:51:47 -07:00
1892827711 chore: merge upstream sync — Mission Control launchpad, runbooks, ProxmoxVE React 19 build fix
All checks were successful
Deploy to Phoenix / deploy (push) Successful in 6s
- Merge origin/main into master (already applied).
- Regenerate doc-manifest.json for new RUNBOOK markdown (60 entries).
- Launchpad: E2E endpoints doc, docs.d-bis.org, Gitea, Chain 138 dapp + env overrides.
- ProxmoxVE submodule: fix react-syntax-highlighter JSX types for Next 15 / React 19.

Made-with: Cursor
2026-04-07 16:28:44 +08:00
820174d556 merge: sync local master with origin/main 2026-04-07 16:07:16 +08:00
252b766a53 feat(mission-control): sync launchpad and runbooks with upstream main
- Merge-aligned doc-manifest regeneration (58 doc-derived runbooks)

- Launchpad: Phoenix Deploy API, Meta testnet 2138 runbook, aggregator route matrix

- Curated runbook: check-chain138-rpc-health.sh; README env vars; catalog test threshold

Made-with: Cursor
2026-03-29 10:13:59 +08:00
fa5de3ba01 merge origin/main: sync upstream with Mission Control 2026-03-29 10:11:51 +08:00
18767b7d8b feat: add Mission Control operator console and workspace wiring
- New mission-control Next.js app: runbook catalog, GO execution, SSE stream, audit ZIP export

- Generated doc-manifest from docs runbooks; curated JSON specs; health-check script

- pnpm workspace package, root scripts, README updates

- Resilience: Windows-safe path checks, optional MISSION_CONTROL_PROJECT_ROOT fallback, system fonts

- Bump mcp-proxmox submodule to tracked main

Made-with: Cursor
2026-03-28 14:50:11 +08:00
1605 changed files with 9987 additions and 315184 deletions

4
.gitignore vendored
View File

@@ -54,6 +54,10 @@ venv/
__pycache__/
*.pyc
# Stray operator-home paste captures (never commit files that were dragged in
# from somebody's ~/projects/... path — place them in the real tree instead)
/home/
# CoinGecko/CMC token logos (generated by prepare-token-logos-512x512.sh)
docs/04-configuration/coingecko/logos/*.png

View File

@@ -1,37 +0,0 @@
# Dependencies
node_modules/
.pnpm-store/
# Package manager lock files (using pnpm as default)
package-lock.json
yarn.lock
# Environment files
.env
.env.local
.env.*.local
# Logs
*.log
logs/
# OS files
.DS_Store
Thumbs.db
# IDE files
.vscode/
.idea/
*.swp
*.swo
*~
# Build outputs
dist/
build/
.next/
out/
# Temporary files
*.tmp
*.temp

View File

@@ -1 +0,0 @@
EVENT_STATUS=NOT_FOUND

View File

@@ -90,6 +90,14 @@ From the root directory, you can run:
- `pnpm frontend:build` - Build the ProxmoxVE frontend for production
- `pnpm frontend:start` - Start the production frontend server
### Mission Control (unified operator console)
- `pnpm mission-control:dev` - Next.js console on **http://localhost:3010** (launchpad + guided runbooks + live run trace + audit ZIP)
- `pnpm mission-control:build` / `pnpm mission-control:start` - Production build and server
- `pnpm mission-control:test` - Executor smoke test (real allowlisted child process)
See [mission-control/README.md](mission-control/README.md) and [mission-control/TIMELINE.md](mission-control/TIMELINE.md).
### Testing
- `pnpm test` - Run tests (if available)

Binary file not shown.

View File

@@ -1,78 +0,0 @@
import { Router, Request, Response } from 'express';
import { v4 as uuidv4 } from 'uuid';
import { buildCommitment, type CommitmentLeaf } from '../mirroring/merkle-commitment.js';
import { saveCommit, getCommit, getProof } from '../mirroring/mirror-store.js';
const router = Router();
router.post('/v1/mirror/commit', (req: Request, res: Response) => {
try {
const body = req.body as { chain_id: number; leaves: CommitmentLeaf[]; uri?: string };
const chain_id = body.chain_id;
const leaves = body.leaves;
const uri = body.uri ?? '';
if (!leaves?.length || chain_id == null) {
return res.status(400).json({ error: 'chain_id and leaves required' });
}
const result = buildCommitment(leaves, chain_id);
const commitId = 'commit-' + uuidv4();
const leavesByTxHash = new Map<string, { leafIndex: number; leafData: unknown }>();
leaves.forEach((leaf, i) => {
leavesByTxHash.set(leaf.txHash.toLowerCase(), { leafIndex: i, leafData: leaf });
});
saveCommit({
commitId,
chainId: chain_id,
startBlock: result.startBlock,
endBlock: result.endBlock,
root: result.root,
uri,
timestamp: Math.floor(Date.now() / 1000),
leafHashes: result.leafHashes,
leavesByTxHash,
publicChainTxHashes: [],
createdAt: new Date().toISOString(),
});
res.status(201).json({
commit_id: commitId,
root: result.root,
start_block: result.startBlock,
end_block: result.endBlock,
chain_id: result.chainId,
schema_version: result.schemaVersion,
leaf_count: result.leafCount,
});
} catch (e) {
res.status(400).json({ error: e instanceof Error ? e.message : 'Bad request' });
}
});
router.get('/v1/mirror/commits/:commitId', (req: Request, res: Response) => {
const c = getCommit(req.params.commitId);
if (!c) return res.status(404).json({ error: 'Commit not found' });
res.json({
commit_id: c.commitId,
chain_id: c.chainId,
start_block: c.startBlock,
end_block: c.endBlock,
root: c.root,
uri: c.uri,
timestamp: c.timestamp,
leaf_count: c.leafHashes.length,
public_chain_tx_hashes: c.publicChainTxHashes,
created_at: c.createdAt,
});
});
router.get('/v1/mirror/proof', (req: Request, res: Response) => {
const chainId = parseInt(req.query.chain_id as string, 10);
const txHash = req.query.tx_hash as string;
if (isNaN(chainId) || !txHash) {
return res.status(400).json({ error: 'chain_id and tx_hash query params required' });
}
const proof = getProof(chainId, txHash);
if (!proof) return res.status(404).json({ error: 'No proof found for this tx' });
res.json(proof);
});
export default router;

View File

@@ -0,0 +1,4 @@
{
"extends": "next/core-web-vitals",
"root": true
}

6
mission-control/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
.next
node_modules
.data
*.tsbuildinfo
coverage
playwright-report

79
mission-control/README.md Normal file
View File

@@ -0,0 +1,79 @@
# Mission Control (unified operator console)
Next.js application in this monorepo: **launchpad** links to existing UIs, **guided runbooks** collect inputs and execute **allowlisted** repo scripts with **live SSE trace**, **graded touchpoints**, **compliance assertions**, and a **downloadable ZIP audit pack** (manifest, events, logs, checksums).
## Run locally
From the **monorepo root**:
```bash
pnpm install
pnpm mission-control:dev
```
Open **http://localhost:3010** (Proxmox helper site can stay on 3000).
### Runbook catalog
- **Hand-written specs:** `mission-control/runbooks/specs/*.json` (short ids like `health-self-check`).
- **All documentation runbooks:** `mission-control/runbooks/doc-manifest.json` is generated from every `docs/**/**RUNBOOK**.md` (excluding master index files). Each entry runs **real** `scripts/...` or `explorer-monorepo/scripts/...` paths extracted from that markdown (up to 14 steps), with **Proxmox host**, **RPC override**, and **Practice mode** inputs.
Regenerate the doc manifest after editing runbook markdown:
```bash
pnpm --filter mission-control run generate:runbooks
```
`pnpm mission-control:build` runs **prebuild**`generate:runbooks` automatically.
### Environment
| Variable | Purpose |
|----------|---------|
| `MISSION_CONTROL_PROJECT_ROOT` | Optional absolute monorepo root. If set but the path does not exist, Mission Control logs a warning and auto-detects from cwd instead (avoids a hard 500). |
| `GIT_BASH_PATH` | Windows: full path to `bash.exe` if not under default Git paths. |
| `NEXT_PUBLIC_HELPER_SCRIPTS_URL` | Launchpad link for helper site (default `http://localhost:3000`). |
| `NEXT_PUBLIC_EXPLORER_URL` | Launchpad link for explorer (default `https://explorer.d-bis.org`). |
| `NEXT_PUBLIC_PHOENIX_DEPLOY_API_URL` | Launchpad link for Phoenix Deploy API health (default `http://localhost:4001/health`). |
| `NEXT_PUBLIC_TESTNET_2138_RUNBOOK_URL` | Meta testnet 2138 runbook (Gitea markdown). |
| `NEXT_PUBLIC_ROUTE_MATRIX_URL` | Aggregator route matrix JSON in repo. |
| `NEXT_PUBLIC_DOCS_MASTER_URL` / `NEXT_PUBLIC_OPERATIONAL_RUNBOOKS_URL` | Override doc deep links on the launchpad. |
| `NEXT_PUBLIC_E2E_ENDPOINTS_DOC_URL` | Gitea markdown for `docs/04-configuration/E2E_ENDPOINTS_LIST.md` (routing verifier inventory). |
| `NEXT_PUBLIC_DOCS_SITE_URL` | Launchpad link for public docs site (default `https://docs.d-bis.org`). |
| `NEXT_PUBLIC_GITEA_URL` | Launchpad link for Gitea (default `https://gitea.d-bis.org`). |
| `NEXT_PUBLIC_CHAIN138_DAPP_URL` | Launchpad link for Chain 138 dapp (default `https://dapp.d-bis.org`). |
## Test
```bash
pnpm mission-control:test
```
Runs a real **health-self-check** (Node child process) against the allowlisted executor.
## Build / production
```bash
pnpm mission-control:build
pnpm mission-control:start
```
Use a **production process manager** (systemd, PM2, container) with `NODE_ENV=production`. The runner executes **only** scripts mapped in `src/lib/allowlist.ts`—no arbitrary shell from the UI.
## Security notes
- Treat this console as **privileged**: anyone who can POST `/api/runs` can trigger allowlisted automation on the host.
- Place **authentication / network restrictions** in front (reverse proxy, VPN, mTLS) for non-local use.
- Secrets in runbook forms: mark `sensitive: true` in JSON specs; values are redacted in `inputs.redacted.json` inside the audit bundle.
## Adding a runbook
**Option A — markdown in `docs/`:** Name the file with `RUNBOOK` in the filename. Reference scripts as `scripts/...` or `explorer-monorepo/scripts/...`. Run `pnpm --filter mission-control run generate:runbooks` and commit the updated `doc-manifest.json`.
**Option B — curated JSON:** Add `runbooks/specs/<id>.json` (see `src/lib/runbook-schema.ts`). Every spec must include an **`execution`** block with allowlisted script paths. Hand-written specs override doc-manifest entries if they share the same `id`.
Execution is allowlisted by path prefix only: **`scripts/`** and **`explorer-monorepo/scripts/`** (see `src/lib/execution-path-validator.ts`).
## Timeline
See [TIMELINE.md](./TIMELINE.md) for phased delivery and estimates.

View File

@@ -0,0 +1,17 @@
# Mission Control — delivery timeline
Estimates assume one engineer familiar with the monorepo. Parallel work (UI + runner hardening) can compress calendar time.
| Phase | Scope | Estimate | Status (this PR) |
|-------|--------|----------|-------------------|
| **P0** | Workspace package, routing, TARDIS-themed shell, launchpad links | 12 days | **Done** |
| **P1** | Runbook JSON schema, catalog UI, help tooltips, GO button, POST `/api/runs` | 23 days | **Done** |
| **P2** | Allowlisted executor (bash + node), job store, SSE stream, live panels | 34 days | **Done** |
| **P3** | Touchpoint grading, compliance assertions, audit ZIP + checksums | 23 days | **Done** |
| **P4** | Vitest smoke test, docs, env knobs for Windows/Git Bash | 1 day | **Done** |
| **P5** | AuthN/Z (OIDC/API key), rate limits, queue (Redis) for multi-instance | 12 weeks | *Future* |
| **P6** | Map remaining `docs/**` runbooks to specs + narrow allowlist expansion | Ongoing | *Future* |
**Total (P0P4):** roughly **913** engineering days for a production-capable v1 on a **trusted network**.
**Wall-clock if focused:** about **2 weeks** including review, hardening, and operator dry-runs on LAN.

6
mission-control/next-env.d.ts vendored Normal file
View File

@@ -0,0 +1,6 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
/// <reference path="./.next/types/routes.d.ts" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@@ -0,0 +1,7 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
reactStrictMode: true,
serverExternalPackages: ['archiver'],
};
export default nextConfig;

View File

@@ -0,0 +1,40 @@
{
"name": "mission-control",
"version": "1.0.0",
"private": true,
"description": "Unified console: launchpad, guided runbooks, live execution, compliance evidence, audit export",
"scripts": {
"generate:runbooks": "node ./scripts/generate-doc-runbook-manifest.mjs",
"prebuild": "pnpm run generate:runbooks",
"dev": "next dev -p 3010",
"build": "next build",
"start": "next start -p 3010",
"lint": "next lint",
"typecheck": "tsc --noEmit",
"test": "vitest run",
"test:watch": "vitest"
},
"dependencies": {
"archiver": "^7.0.1",
"clsx": "^2.1.1",
"lucide-react": "^0.561.0",
"next": "15.5.8",
"react": "19.2.3",
"react-dom": "19.2.3",
"tailwind-merge": "^3.4.1",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/archiver": "^7.0.0",
"@types/node": "^22.19.3",
"@types/react": "^19.2.7",
"@types/react-dom": "^19.2.3",
"autoprefixer": "^10.4.23",
"eslint": "^9.39.2",
"eslint-config-next": "15.5.8",
"postcss": "^8.5.6",
"tailwindcss": "^3.4.19",
"typescript": "^5.9.3",
"vitest": "^2.1.9"
}
}

View File

@@ -0,0 +1,9 @@
/** @type {import('postcss-load-config').Config} */
const config = {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
};
export default config;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,67 @@
{
"id": "check-chain138-rpc-health",
"title": "Chain 138 RPC health (HTTP + peers + public probe)",
"summary": "Runs the repository script that checks Besu HTTP RPCs in parallel (head spread, peer counts) and probes public RPC capability against the documented matrix.",
"whyItMatters": "Catches silent RPC drift, split heads, or under-peered nodes before wallets and bridges fail in production.",
"audienceHelp": "You need bash (WSL, Git Bash, or Linux/macOS) and network reachability to the RPC IPs in config/ip-addresses.conf. Tunables: RPC_MAX_HEAD_SPREAD, RPC_MIN_PEERS, RPC_TIMEOUT_SEC, CHAIN138_PUBLIC_RPC_URL.",
"docPath": "scripts/verify/check-chain138-rpc-health.sh",
"prerequisites": [
"Bash and curl available on PATH.",
"Optional: config/ip-addresses.conf present for LAN IP overrides."
],
"steps": [
{
"title": "Parallel RPC checks",
"plainText": "The script queries each configured HTTP endpoint for block number and peer count, then compares head spread and minimum peers.",
"technicalNote": "See scripts/verify/check-chain138-rpc-health.sh header for env defaults."
},
{
"title": "Public capability probe",
"plainText": "Validates the public RPC URL against the documented support matrix (methods / capabilities).",
"technicalNote": "CHAIN138_PUBLIC_RPC_URL overrides https://rpc-http-pub.d-bis.org"
}
],
"inputs": [
{
"name": "proxmoxHost",
"label": "Proxmox host",
"type": "string",
"help": "Passed as PROXMOX_HOST for any downstream tooling that reads it (this script primarily uses RPC IPs).",
"example": "192.168.11.10",
"default": "192.168.11.10"
},
{
"name": "rpcUrlOverride",
"label": "RPC URL override (optional)",
"type": "string",
"help": "If non-empty, set as RPC_URL_138 in the environment for consistency with other runbooks.",
"example": "http://192.168.11.211:8545",
"default": ""
},
{
"name": "practiceMode",
"label": "Practice mode (--dry-run where supported)",
"type": "boolean",
"help": "This script does not implement --dry-run; leave off for a real check.",
"default": false
}
],
"touchpoints": [
{
"id": "exit_ok",
"label": "Script exit",
"description": "check-chain138-rpc-health.sh exited 0.",
"passCondition": "exit_zero"
}
],
"complianceFramework": "DBIS-MC-CHAIN138-RPC-1",
"execution": {
"steps": [
{
"interpreter": "bash",
"scriptRelative": "scripts/verify/check-chain138-rpc-health.sh",
"args": []
}
]
}
}

View File

@@ -0,0 +1,43 @@
{
"id": "health-self-check",
"title": "Mission Control pipeline check",
"summary": "Runs a tiny built-in command to prove the console can start processes and record results.",
"whyItMatters": "If this fails, the problem is the console or Node on this machine—not your network or Proxmox.",
"audienceHelp": "You do not need to know what Node is. Press the button; green means the control room is working.",
"docPath": "mission-control/README.md",
"prerequisites": ["You are on the machine where Mission Control is installed."],
"steps": [
{
"title": "What happens",
"plainText": "The system runs one safe line of code that prints a short success message. Nothing on your network is changed.",
"technicalNote": "Executes scripts/mission-control/health-check.mjs",
"example": "Output line: MISSION_CONTROL_HEALTH_OK"
}
],
"inputs": [],
"touchpoints": [
{
"id": "stdout_marker",
"label": "Success marker in output",
"description": "Proves stdout was captured.",
"passCondition": "stdout_contains",
"pattern": "MISSION_CONTROL_HEALTH_OK"
},
{
"id": "clean_exit",
"label": "Process exit",
"description": "Proves the child process ended without error.",
"passCondition": "exit_zero"
}
],
"complianceFramework": "DBIS-MC-INTERNAL-1",
"execution": {
"steps": [
{
"interpreter": "node",
"scriptRelative": "scripts/mission-control/health-check.mjs",
"args": []
}
]
}
}

View File

@@ -0,0 +1,42 @@
{
"id": "reconcile-env-canonical",
"title": "Print canonical Chain 138 environment lines",
"summary": "Emits the recommended contract address lines for smom-dbis-138/.env from the documentation source of truth.",
"whyItMatters": "Keeps deploy and tooling aligned with the same addresses your docs say are canonical—without opening large markdown files by hand.",
"audienceHelp": "You are not editing secrets here. The script only prints suggested lines; you copy them into your env file if your operator approves.",
"docPath": "scripts/verify/reconcile-env-canonical.sh",
"prerequisites": ["Bash available.", "docs/11-references/CONTRACT_ADDRESSES_REFERENCE.md exists in the repo."],
"steps": [
{
"title": "Review output",
"plainText": "The console will show lines like COMPLIANT_USDT=0x… Compare them to your smom-dbis-138/.env with your team lead.",
"technicalNote": "Runs reconcile-env-canonical.sh --print"
}
],
"inputs": [],
"touchpoints": [
{
"id": "canonical_marker",
"label": "Canonical lines emitted",
"description": "Output includes known canonical variable names.",
"passCondition": "stdout_contains",
"pattern": "COMPLIANCE_REGISTRY="
},
{
"id": "exit_ok",
"label": "Script exit",
"description": "Script finished successfully.",
"passCondition": "exit_zero"
}
],
"complianceFramework": "DBIS-MC-INTERNAL-1",
"execution": {
"steps": [
{
"interpreter": "bash",
"scriptRelative": "scripts/verify/reconcile-env-canonical.sh",
"args": ["--print"]
}
]
}
}

View File

@@ -0,0 +1,59 @@
{
"id": "run-completable-anywhere",
"title": "Run “completable from anywhere” validation suite",
"summary": "Runs config validation, optional on-chain checks, full validation (genesis skipped), and env reconciliation printout.",
"whyItMatters": "This is the same high-level health pass documented for machines that are not on the operator LAN.",
"audienceHelp": "Start with Practice mode. A full run can take several minutes and may try to reach Chain 138 RPC if your network allows it.",
"docPath": "scripts/run-completable-tasks-from-anywhere.sh",
"prerequisites": ["Bash available.", "Network access optional for some steps."],
"steps": [
{
"title": "Practice mode",
"plainText": "Lists the four steps without executing them.",
"example": "You should see “Completable from anywhere (--dry-run”"
},
{
"title": "Full run",
"plainText": "Executes all four steps. Some steps tolerate RPC failure; read the live log if anything is yellow or red.",
"technicalNote": "See MASTER_INDEX.md “completable from anywhere”"
}
],
"inputs": [
{
"name": "dryRun",
"label": "Practice mode (dry run)",
"type": "boolean",
"help": "Safe preview of what would run.",
"default": true
}
],
"touchpoints": [
{
"id": "done_banner",
"label": "Completion signal",
"description": "Detects section headers printed in both dry-run and full execution.",
"passCondition": "stdout_contains",
"pattern": "==="
},
{
"id": "exit_ok",
"label": "Exit code",
"description": "Process exited zero.",
"passCondition": "exit_zero"
}
],
"complianceFramework": "DBIS-MC-INTERNAL-1",
"execution": {
"steps": [
{
"interpreter": "bash",
"scriptRelative": "scripts/run-completable-tasks-from-anywhere.sh",
"args": [],
"supportsDryRun": true,
"whenInputTrue": {
"dryRun": ["--dry-run"]
}
}
]
}
}

View File

@@ -0,0 +1,63 @@
{
"id": "validate-config-files",
"title": "Validate repository configuration files",
"summary": "Checks that key config files (IPs, token lists, mappings) exist and look structurally valid.",
"whyItMatters": "Broken or missing config causes silent failures later when you deploy or run operator scripts.",
"audienceHelp": "Use Practice mode first—it only shows what would be checked. Turn it off when you want a real check.",
"docPath": "scripts/validation/validate-config-files.sh",
"prerequisites": [
"Bash available (macOS/Linux, WSL, or Git for Windows).",
"Repository root is the monorepo (contains config/ and pnpm-workspace.yaml)."
],
"steps": [
{
"title": "Practice mode (recommended first)",
"plainText": "When Practice mode is on, the script lists what it would validate and exits successfully without touching files.",
"example": "You will see lines starting with === Validation (--dry-run"
},
{
"title": "Full check",
"plainText": "Turn Practice mode off to scan the repo. jq may be used if installed for JSON validation.",
"technicalNote": "Script: scripts/validation/validate-config-files.sh"
}
],
"inputs": [
{
"name": "dryRun",
"label": "Practice mode (dry run)",
"type": "boolean",
"help": "When enabled, no real file checks run—only a safe preview.",
"example": "Start with this ON, then run again with it OFF.",
"default": true
}
],
"touchpoints": [
{
"id": "exit_ok",
"label": "Script completed without crash",
"passCondition": "exit_zero",
"description": "Non-zero exit means validation reported errors."
},
{
"id": "signal_ok",
"label": "Expected log signal",
"description": "Detects either dry-run banner or success line.",
"passCondition": "stdout_contains",
"pattern": "Validation"
}
],
"complianceFramework": "DBIS-MC-INTERNAL-1",
"execution": {
"steps": [
{
"interpreter": "bash",
"scriptRelative": "scripts/validation/validate-config-files.sh",
"args": [],
"supportsDryRun": true,
"whenInputTrue": {
"dryRun": ["--dry-run"]
}
}
]
}
}

View File

@@ -0,0 +1,35 @@
{
"id": "verify-ws-rpc-chain138",
"title": "Verify WebSocket RPC (Chain 138)",
"summary": "Runs the repository script that checks WebSocket connectivity to the configured Chain 138 RPC endpoint.",
"whyItMatters": "Wallets and some services use WebSockets; HTTP-only checks are not enough.",
"audienceHelp": "You need network reachability to the RPC URL in your environment. If this fails, ask whether you are on the correct network or VPN.",
"docPath": "scripts/verify-ws-rpc-chain138.mjs",
"prerequisites": ["Node.js on PATH.", "RPC/WebSocket URL reachable from this machine (see root package.json verify:ws-chain138)."],
"steps": [
{
"title": "Run check",
"plainText": "The script prints connection results. Green in the live log usually means the socket answered.",
"technicalNote": "pnpm verify:ws-chain138 from repo root is equivalent."
}
],
"inputs": [],
"touchpoints": [
{
"id": "exit_ok",
"label": "Script exit",
"description": "verify-ws-rpc-chain138.mjs exited 0.",
"passCondition": "exit_zero"
}
],
"complianceFramework": "DBIS-MC-INTERNAL-1",
"execution": {
"steps": [
{
"interpreter": "node",
"scriptRelative": "scripts/verify-ws-rpc-chain138.mjs",
"args": []
}
]
}
}

View File

@@ -0,0 +1,238 @@
#!/usr/bin/env node
/**
* Scans docs for markdown files whose names contain RUNBOOK.
* Writes mission-control/runbooks/doc-manifest.json with executable steps.
*/
import crypto from 'node:crypto';
import fs from 'node:fs';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const MC_ROOT = path.resolve(__dirname, '..');
const REPO_ROOT = path.resolve(MC_ROOT, '..');
const OUT = path.join(MC_ROOT, 'runbooks', 'doc-manifest.json');
const EXCLUDE_NAMES = new Set([
'RUNBOOKS_MASTER_INDEX.md',
'OPERATIONAL_RUNBOOKS.md',
'TEZOS_CCIP_RUNBOOKS_INDEX.md',
'OMNL_OFFICE_MASTER_RUNBOOK_INDEX.md',
]);
const SCRIPT_RE =
/(?:^|[\s"'`(])\.?\/?((?:scripts|explorer-monorepo\/scripts)\/[a-zA-Z0-9_.\/-]+\.(?:sh|mjs))/g;
const MAX_STEPS = 14;
const FALLBACK_SCRIPT = 'scripts/validation/validate-config-files.sh';
/** Paths meant to be sourced (running them as a step is misleading). */
const SKIP_SCRIPT_PATHS = new Set([
'scripts/lib/load-project-env.sh',
'scripts/lib/load-contract-addresses.sh',
]);
const STANDARD_INPUTS = [
{
name: 'proxmoxHost',
label: 'Proxmox host',
type: 'string',
help: 'Used as PROXMOX_HOST in the environment for scripts that read it (e.g. 192.168.11.10).',
example: '192.168.11.10',
default: '192.168.11.10',
},
{
name: 'rpcUrlOverride',
label: 'RPC URL override (optional)',
type: 'string',
help: 'If non-empty, set as RPC_URL_138 for scripts that use Chain 138 RPC.',
example: 'http://192.168.11.211:8545',
default: '',
},
{
name: 'practiceMode',
label: 'Practice mode (--dry-run where supported)',
type: 'boolean',
help: 'When enabled, each step whose script advertises --dry-run receives that flag.',
default: false,
},
];
function walkDocs(dir, acc = []) {
if (!fs.existsSync(dir)) return acc;
for (const name of fs.readdirSync(dir, { withFileTypes: true })) {
const p = path.join(dir, name.name);
if (name.isDirectory()) walkDocs(p, acc);
else {
const up = name.name.toUpperCase();
if (!up.includes('RUNBOOK') || !name.name.toLowerCase().endsWith('.md')) continue;
if (EXCLUDE_NAMES.has(name.name)) continue;
acc.push(p);
}
}
return acc;
}
function relFromRepo(abs) {
return path.relative(REPO_ROOT, abs).split(path.sep).join('/');
}
function makeId(rel) {
const slug = rel
.replace(/^docs[/\\]/, '')
.replace(/\.md$/i, '')
.split(/[/\\]/)
.join('-')
.replace(/[^a-zA-Z0-9-]+/g, '-')
.toLowerCase()
.replace(/^-|-$/g, '');
const base = `doc-${slug}`.slice(0, 120);
const h = crypto.createHash('sha256').update(rel).digest('hex').slice(0, 8);
return `${base}-${h}`;
}
function extractTitle(content) {
const m = content.match(/^#\s+(.+)$/m);
return m ? m[1].trim() : 'Runbook';
}
function extractSummary(content) {
const lines = content.split('\n');
for (const line of lines) {
const t = line.trim();
if (!t || t.startsWith('#')) continue;
if (t.startsWith('```')) continue;
return t.slice(0, 400);
}
return 'Operational procedure from repository documentation.';
}
function normalizeScript(raw) {
let s = raw.replace(/^\.\//, '');
if (s.startsWith('/')) return null;
if (s.includes('..')) return null;
return s;
}
function extractScripts(content) {
const seen = new Set();
const ordered = [];
let m;
const re = new RegExp(SCRIPT_RE.source, 'g');
while ((m = re.exec(content)) !== null) {
const n = normalizeScript(m[1]);
if (!n || seen.has(n) || SKIP_SCRIPT_PATHS.has(n)) continue;
const abs = path.join(REPO_ROOT, n);
if (!fs.existsSync(abs)) continue;
seen.add(n);
ordered.push(n);
if (ordered.length >= MAX_STEPS) break;
}
return ordered;
}
function scriptSupportsDryRun(scriptRel) {
try {
const abs = path.join(REPO_ROOT, scriptRel);
const chunk = fs.readFileSync(abs, 'utf8').slice(0, 12000);
return /--dry-run\b/.test(chunk);
} catch {
return false;
}
}
function buildEntry(absPath) {
const rel = relFromRepo(absPath);
const content = fs.readFileSync(absPath, 'utf8');
const title = extractTitle(content);
const summary = extractSummary(content);
const scripts = extractScripts(content);
let usedFallback = false;
let steps = scripts.map((scriptRelative) => ({
interpreter: scriptRelative.endsWith('.mjs') ? 'node' : 'bash',
scriptRelative,
args: [],
supportsDryRun: scriptRelative.endsWith('.sh') && scriptSupportsDryRun(scriptRelative),
}));
if (steps.length === 0) {
usedFallback = true;
const fr = FALLBACK_SCRIPT;
if (fs.existsSync(path.join(REPO_ROOT, fr))) {
steps = [
{
interpreter: 'bash',
scriptRelative: fr,
args: [],
supportsDryRun: scriptSupportsDryRun(fr),
},
];
}
}
const id = makeId(rel);
const why = usedFallback
? 'No shell/Node script paths were detected in this markdown. Mission Control runs repository config validation so you still get an automated check; follow the documentation for the full manual procedure.'
: 'Automated steps are the scripts explicitly referenced in this runbook. Review the documentation for prerequisites (SSH, VPN, secrets) before running in production.';
const spec = {
id,
title,
summary,
whyItMatters:
'This links documentation to executable automation in the monorepo. Operators get repeatable runs and an audit trail.',
audienceHelp:
'Use Practice mode when a script supports it. Set Proxmox host and RPC override when your environment differs from defaults.',
docPath: rel,
prerequisites: [
'Read the linked markdown runbook for safety and ordering.',
'Bash (Linux, macOS, WSL, or Git Bash on Windows) for .sh steps; Node for .mjs.',
'Network, SSH, or API access as required by the underlying scripts.',
],
steps: [
{
title: 'Documentation',
plainText: `Open and follow: ${rel}`,
technicalNote: 'Automated steps below are derived from script paths mentioned in that file.',
},
],
inputs: STANDARD_INPUTS,
execution: { steps },
touchpoints: [
{
id: 'pipeline_exit',
label: 'All automated steps completed',
description: 'Aggregate exit status of the script chain.',
passCondition: 'exit_zero',
},
],
complianceFramework: 'DBIS-MC-DOC-RUNBOOK-1',
executionNote: why,
};
return spec;
}
function main() {
const docsRoot = path.join(REPO_ROOT, 'docs');
const files = walkDocs(docsRoot);
files.sort((a, b) => relFromRepo(a).localeCompare(relFromRepo(b)));
const entries = [];
const ids = new Set();
for (const f of files) {
const spec = buildEntry(f);
if (ids.has(spec.id)) {
spec.id = `${spec.id}-x${crypto.randomBytes(2).toString('hex')}`;
}
ids.add(spec.id);
entries.push(spec);
}
fs.mkdirSync(path.dirname(OUT), { recursive: true });
fs.writeFileSync(OUT, JSON.stringify({ generatedAt: new Date().toISOString(), runbooks: entries }, null, 2), 'utf8');
console.error(`Wrote ${entries.length} doc-derived runbooks to ${path.relative(REPO_ROOT, OUT)}`);
}
main();

View File

@@ -0,0 +1,17 @@
import { NextResponse } from 'next/server';
import { loadRunbookSpec } from '@/lib/load-specs';
export const dynamic = 'force-dynamic';
export const runtime = 'nodejs';
export async function GET(
_req: Request,
ctx: { params: Promise<{ id: string }> },
) {
const { id } = await ctx.params;
const spec = loadRunbookSpec(id);
if (!spec) {
return NextResponse.json({ error: 'Runbook not found' }, { status: 404 });
}
return NextResponse.json(spec);
}

View File

@@ -0,0 +1,15 @@
import { NextResponse } from 'next/server';
import { loadAllRunbookSpecs } from '@/lib/load-specs';
export const dynamic = 'force-dynamic';
export const runtime = 'nodejs';
export async function GET() {
try {
const runbooks = loadAllRunbookSpecs();
return NextResponse.json({ runbooks });
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return NextResponse.json({ error: msg }, { status: 500 });
}
}

View File

@@ -0,0 +1,27 @@
import { buildAuditZipBuffer } from '@/lib/audit-zip';
export const dynamic = 'force-dynamic';
export const runtime = 'nodejs';
export async function GET(
_req: Request,
ctx: { params: Promise<{ id: string }> },
) {
const { id } = await ctx.params;
try {
const buf = await buildAuditZipBuffer(id);
return new Response(new Uint8Array(buf), {
headers: {
'Content-Type': 'application/zip',
'Content-Disposition': `attachment; filename="mission-control-audit-${id}.zip"`,
'Cache-Control': 'no-store',
},
});
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return new Response(JSON.stringify({ error: msg }), {
status: msg.includes('not found') ? 404 : 500,
headers: { 'Content-Type': 'application/json' },
});
}
}

View File

@@ -0,0 +1,19 @@
import { NextResponse } from 'next/server';
import { getJobStore } from '@/lib/job-store';
export const dynamic = 'force-dynamic';
export const runtime = 'nodejs';
export async function GET(
_req: Request,
ctx: { params: Promise<{ id: string }> },
) {
const { id } = await ctx.params;
const store = getJobStore();
const meta = store.readMeta(id);
if (!meta) {
return NextResponse.json({ error: 'Run not found' }, { status: 404 });
}
const events = store.readEvents(id);
return NextResponse.json({ meta, events });
}

View File

@@ -0,0 +1,70 @@
import { getJobStore } from '@/lib/job-store';
import type { RunEvent } from '@/lib/run-events';
export const dynamic = 'force-dynamic';
export const runtime = 'nodejs';
export async function GET(
req: Request,
ctx: { params: Promise<{ id: string }> },
) {
const { id } = await ctx.params;
const store = getJobStore();
if (!store.readMeta(id)) {
return new Response(JSON.stringify({ error: 'Run not found' }), {
status: 404,
headers: { 'Content-Type': 'application/json' },
});
}
const encoder = new TextEncoder();
const bus = store.getRunBus(id);
const stream = new ReadableStream({
start(controller) {
const send = (ev: RunEvent) => {
controller.enqueue(encoder.encode(`data: ${JSON.stringify(ev)}\n\n`));
};
for (const ev of store.readEvents(id)) {
send(ev);
}
const onEv = (ev: unknown) => send(ev as RunEvent);
bus.on('event', onEv);
let poll: ReturnType<typeof setInterval>;
const close = () => {
clearInterval(poll);
bus.off('event', onEv);
try {
controller.close();
} catch {
/* closed */
}
};
poll = setInterval(() => {
const meta = store.readMeta(id);
if (
meta?.status === 'succeeded' ||
meta?.status === 'failed' ||
meta?.status === 'error'
) {
clearInterval(poll);
setTimeout(close, 250);
}
}, 400);
req.signal.addEventListener('abort', close);
},
});
return new Response(stream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-store, no-transform',
Connection: 'keep-alive',
},
});
}

View File

@@ -0,0 +1,43 @@
import { NextResponse } from 'next/server';
import { z, ZodError } from 'zod';
import { queueRun } from '@/lib/executor';
import { loadRunbookSpec } from '@/lib/load-specs';
import { coerceRunbookInputs } from '@/lib/coerce-inputs';
export const dynamic = 'force-dynamic';
export const runtime = 'nodejs';
const postBodySchema = z.object({
runbookId: z.string().min(1),
inputs: z.record(z.string(), z.unknown()).optional().default({}),
});
export async function POST(req: Request) {
try {
let json: unknown;
try {
json = await req.json();
} catch {
return NextResponse.json({ error: 'Invalid JSON body' }, { status: 400 });
}
const body = postBodySchema.parse(json);
const spec = loadRunbookSpec(body.runbookId);
if (!spec) {
return NextResponse.json({ error: 'Unknown runbook' }, { status: 404 });
}
const inputs = coerceRunbookInputs(spec, body.inputs);
const { runId } = queueRun(body.runbookId, inputs);
return NextResponse.json({
runId,
streamUrl: `/api/runs/${runId}/stream`,
auditUrl: `/api/runs/${runId}/audit`,
metaUrl: `/api/runs/${runId}`,
});
} catch (e) {
if (e instanceof ZodError) {
return NextResponse.json({ error: 'Invalid request', issues: e.issues }, { status: 400 });
}
const msg = e instanceof Error ? e.message : String(e);
return NextResponse.json({ error: msg }, { status: 500 });
}
}

View File

@@ -0,0 +1,29 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
:root {
--tardis-glow: 0 180 216;
}
body {
@apply min-h-screen bg-gradient-to-b from-tardis-deep via-tardis-panel to-tardis-deep text-tardis-paper antialiased;
background-attachment: fixed;
}
/* Subtle “police box” corner accents */
.mc-panel {
@apply relative rounded-xl border border-tardis-glow/25 bg-tardis-panel/40 shadow-panel backdrop-blur-md;
box-shadow:
0 0 0 1px rgba(0, 180, 216, 0.12),
inset 0 1px 0 rgba(255, 255, 255, 0.05);
}
.mc-panel::before {
content: '';
@apply pointer-events-none absolute inset-x-3 top-0 h-px bg-gradient-to-r from-transparent via-tardis-glow/60 to-transparent;
}
.mc-glow-text {
text-shadow: 0 0 12px rgba(0, 180, 216, 0.45);
}

View File

@@ -0,0 +1,20 @@
import type { Metadata } from 'next';
import './globals.css';
export const metadata: Metadata = {
title: 'Mission Control | DBIS Operator Console',
description:
'Unified console: launchpad, guided runbooks, live execution trace, compliance evidence, audit export.',
};
export default function RootLayout({
children,
}: Readonly<{
children: React.ReactNode;
}>) {
return (
<html lang="en">
<body className="font-display">{children}</body>
</html>
);
}

View File

@@ -0,0 +1,62 @@
import Link from 'next/link';
import { ExternalLink, Rocket } from 'lucide-react';
import { getLaunchDestinations } from '@/lib/launchpad';
export default function HomePage() {
const destinations = getLaunchDestinations();
return (
<main className="mx-auto max-w-5xl px-4 py-12">
<header className="mb-12 text-center">
<p className="mb-2 text-sm uppercase tracking-[0.35em] text-tardis-glow/80">
Temporal operations
</p>
<h1 className="mc-glow-text text-4xl font-bold text-tardis-paper md:text-5xl">
Mission Control
</h1>
<p className="mx-auto mt-4 max-w-2xl text-lg text-tardis-paper/75">
A calm console for people who are not infra nativeswith receipts for auditors who are.
</p>
<div className="mt-8 flex flex-wrap justify-center gap-4">
<Link
href="/runbooks"
className="inline-flex items-center gap-2 rounded-lg bg-tardis-bright px-6 py-3 font-semibold text-white shadow-tardis transition hover:bg-tardis-glow"
>
<Rocket className="h-5 w-5" aria-hidden />
Guided runbooks
</Link>
</div>
</header>
<section className="mc-panel p-6 md:p-8">
<h2 className="text-xl font-semibold text-tardis-glow">Launchpad</h2>
<p className="mt-2 text-sm text-tardis-paper/70">
Jump to tools that already exist. Start the helper site separately if you use the default port.
</p>
<ul className="mt-6 grid gap-4 md:grid-cols-2">
{destinations.map((d) => (
<li key={d.id}>
<a
href={d.href}
target="_blank"
rel="noopener noreferrer"
className="group flex h-full flex-col rounded-lg border border-white/10 bg-black/20 p-4 transition hover:border-tardis-glow/40 hover:bg-black/30"
>
<span className="flex items-start justify-between gap-2">
<span className="font-semibold text-tardis-paper">{d.title}</span>
<ExternalLink className="h-4 w-4 shrink-0 text-tardis-glow opacity-70 group-hover:opacity-100" />
</span>
<span className="mt-2 text-sm text-tardis-paper/65">{d.description}</span>
<span className="mt-3 font-mono text-xs text-tardis-amber/90">{d.href}</span>
</a>
</li>
))}
</ul>
</section>
<footer className="mt-12 text-center text-xs text-tardis-paper/45">
Operator console · evidence-first · no silent magic
</footer>
</main>
);
}

View File

@@ -0,0 +1,186 @@
'use client';
import { useRouter } from 'next/navigation';
import { useCallback, useState } from 'react';
import { FileText } from 'lucide-react';
import type { RunbookSpec } from '@/lib/runbook-schema';
import { HelpTip } from '@/components/HelpTip';
import { GoButton } from '@/components/GoButton';
import { cn } from '@/lib/cn';
type Props = {
spec: RunbookSpec;
};
export function RunbookRunner({ spec }: Props) {
const router = useRouter();
const [inputs, setInputs] = useState<Record<string, unknown>>(() => {
const o: Record<string, unknown> = {};
for (const f of spec.inputs) {
if (f.default !== undefined) o[f.name] = f.default;
}
return o;
});
const [loading, setLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const docHref = `/api/runbooks/${spec.id}`;
const onRun = useCallback(async () => {
setError(null);
setLoading(true);
try {
const res = await fetch('/api/runs', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ runbookId: spec.id, inputs }),
});
const data = (await res.json()) as { runId?: string; error?: string };
if (!res.ok) {
throw new Error(data.error ?? res.statusText);
}
if (!data.runId) throw new Error('No run id returned');
router.push(`/runbooks/${spec.id}/run/${data.runId}`);
} catch (e) {
setError(e instanceof Error ? e.message : String(e));
} finally {
setLoading(false);
}
}, [inputs, router, spec.id]);
return (
<div className="mt-6 space-y-8">
<header>
<h1 className="text-2xl font-bold text-tardis-paper">{spec.title}</h1>
<p className="mt-2 text-tardis-paper/75">{spec.summary}</p>
<div className="mt-4 rounded-lg border border-tardis-amber/30 bg-black/25 p-4 text-sm text-tardis-paper/85">
<p className="font-semibold text-tardis-amber">Why this matters</p>
<p className="mt-1">{spec.whyItMatters}</p>
</div>
{spec.executionNote ? (
<div className="mt-3 rounded-lg border border-tardis-glow/25 bg-tardis-panel/30 p-3 text-sm text-tardis-paper/80">
<p className="font-semibold text-tardis-glow">Automation note</p>
<p className="mt-1">{spec.executionNote}</p>
</div>
) : null}
<p className="mt-3 flex items-start gap-2 text-sm text-tardis-paper/70">
<FileText className="mt-0.5 h-4 w-4 shrink-0 text-tardis-glow" aria-hidden />
<span>
Reference in repo: <code className="text-tardis-glow">{spec.docPath}</code> ·{' '}
<a className="text-tardis-glow underline" href={docHref}>
Machine-readable spec (JSON)
</a>
</span>
</p>
</header>
<section className="mc-panel p-5">
<h2 className="flex flex-wrap items-center gap-2 text-lg font-semibold text-tardis-glow">
Before you start
<HelpTip title="Plain-language note" body={spec.audienceHelp} />
</h2>
<ul className="mt-3 list-inside list-disc text-sm text-tardis-paper/80">
{spec.prerequisites.map((p) => (
<li key={p}>{p}</li>
))}
</ul>
</section>
<section className="mc-panel p-5">
<h2 className="text-lg font-semibold text-tardis-glow">What will happen</h2>
<ol className="mt-4 space-y-4">
{spec.steps.map((step, i) => (
<li key={step.title} className="flex gap-3 text-sm">
<span className="flex h-7 w-7 shrink-0 items-center justify-center rounded-full bg-tardis-bright/40 text-xs font-bold text-white">
{i + 1}
</span>
<div>
<p className="font-semibold text-tardis-paper">{step.title}</p>
<p className="mt-1 text-tardis-paper/75">{step.plainText}</p>
{step.technicalNote ? (
<p className="mt-1 font-mono text-xs text-tardis-glow/80">{step.technicalNote}</p>
) : null}
{step.example ? (
<pre className="mt-2 rounded bg-black/35 p-2 font-mono text-xs text-tardis-amber">
{step.example}
</pre>
) : null}
</div>
</li>
))}
</ol>
</section>
{spec.inputs.length > 0 ? (
<section className="mc-panel p-5">
<h2 className="text-lg font-semibold text-tardis-glow">Your inputs</h2>
<div className="mt-4 space-y-5">
{spec.inputs.map((field) => (
<div key={field.name}>
<label className="flex items-center gap-2 text-sm font-medium text-tardis-paper">
{field.type === 'boolean' ? (
<input
type="checkbox"
className="h-4 w-4 rounded border-tardis-glow/50 bg-black/40 text-tardis-bright"
checked={Boolean(inputs[field.name])}
onChange={(e) =>
setInputs((prev) => ({ ...prev, [field.name]: e.target.checked }))
}
/>
) : null}
<span>{field.label}</span>
<HelpTip title={field.label} body={field.help} example={field.example} />
</label>
{field.type === 'string' || field.type === 'number' ? (
<input
type={field.type === 'number' ? 'number' : 'text'}
className={cn(
'mt-2 w-full rounded-lg border border-white/15 bg-black/30 px-3 py-2 text-sm text-tardis-paper',
'focus:border-tardis-glow focus:outline-none focus:ring-1 focus:ring-tardis-glow',
)}
value={String(inputs[field.name] ?? '')}
onChange={(e) =>
setInputs((prev) => ({
...prev,
[field.name]:
field.type === 'number' ? Number(e.target.value) : e.target.value,
}))
}
/>
) : null}
{field.type === 'select' && field.options ? (
<select
className="mt-2 w-full rounded-lg border border-white/15 bg-black/30 px-3 py-2 text-sm text-tardis-paper"
value={String(inputs[field.name] ?? field.options[0]?.value ?? '')}
onChange={(e) =>
setInputs((prev) => ({ ...prev, [field.name]: e.target.value }))
}
>
{field.options.map((o) => (
<option key={o.value} value={o.value}>
{o.label}
</option>
))}
</select>
) : null}
</div>
))}
</div>
</section>
) : null}
<section className="mc-panel flex flex-col items-center gap-4 p-8">
<p className="text-center text-sm text-tardis-paper/65">
When you press the button, the system runs the real allowlisted script and records every step for
your audit pack.
</p>
<GoButton onClick={onRun} loading={loading} disabled={loading} />
{error ? (
<p className="text-center text-sm text-red-400" role="alert">
{error}
</p>
) : null}
</section>
</div>
);
}

View File

@@ -0,0 +1,13 @@
import Link from 'next/link';
export default function RunbookNotFound() {
return (
<main className="mx-auto max-w-lg px-4 py-16 text-center">
<h1 className="text-2xl font-bold text-tardis-paper">Runbook not found</h1>
<p className="mt-2 text-tardis-paper/70">That procedure is not in the catalog.</p>
<Link href="/runbooks" className="mt-6 inline-block text-tardis-glow underline">
Back to runbooks
</Link>
</main>
);
}

View File

@@ -0,0 +1,29 @@
import Link from 'next/link';
import { notFound } from 'next/navigation';
import type { RunbookSpec } from '@/lib/runbook-schema';
import { loadRunbookSpec } from '@/lib/load-specs';
import { RunbookRunner } from './RunbookRunner';
export default async function RunbookPage({
params,
}: {
params: Promise<{ runbookId: string }>;
}) {
const { runbookId } = await params;
const spec = loadRunbookSpec(runbookId);
if (!spec) {
notFound();
}
/** Plain JSON so the client bundle never receives non-serializable values from Zod/parse. */
const clientSpec = JSON.parse(JSON.stringify(spec)) as RunbookSpec;
return (
<main className="mx-auto max-w-3xl px-4 py-10">
<Link href="/runbooks" className="text-sm text-tardis-glow hover:underline">
All runbooks
</Link>
<RunbookRunner spec={clientSpec} />
</main>
);
}

View File

@@ -0,0 +1,255 @@
'use client';
import Link from 'next/link';
import { useParams } from 'next/navigation';
import { useEffect, useMemo, useState } from 'react';
import { Activity, CheckCircle2, Download, Shield, Terminal } from 'lucide-react';
import type { RunEvent } from '@/lib/run-events';
import { cn } from '@/lib/cn';
function gradeClass(g: string | null | undefined): string {
if (g === 'GREEN') return 'text-emerald-400 border-emerald-500/40 bg-emerald-500/10';
if (g === 'AMBER') return 'text-tardis-amber border-tardis-amber/40 bg-tardis-amber/10';
if (g === 'RED') return 'text-red-400 border-red-500/40 bg-red-500/10';
return 'text-tardis-paper/60 border-white/20 bg-black/20';
}
function findFinishEvent(
events: RunEvent[],
): Extract<RunEvent, { type: 'run_finished' | 'run_error' }> | undefined {
for (let i = events.length - 1; i >= 0; i--) {
const e = events[i];
if (e.type === 'run_finished' || e.type === 'run_error') return e;
}
return undefined;
}
export default function RunLivePage() {
const params = useParams<{ runbookId: string; runId: string }>();
const runbookId = params.runbookId;
const runId = params.runId;
const [events, setEvents] = useState<RunEvent[]>([]);
const [logError, setLogError] = useState<string | null>(null);
useEffect(() => {
const es = new EventSource(`/api/runs/${runId}/stream`);
es.onmessage = (ev) => {
try {
const parsed = JSON.parse(ev.data) as RunEvent;
setEvents((prev) => {
const s = JSON.stringify(parsed);
if (prev.some((p) => JSON.stringify(p) === s)) return prev;
return [...prev, parsed];
});
} catch {
/* ignore */
}
};
es.onerror = () => {
setLogError('Stream interrupted. Refresh the page if the run was still active.');
es.close();
};
return () => es.close();
}, [runId]);
const finish = useMemo(() => findFinishEvent(events), [events]);
const overallGrade =
finish?.type === 'run_finished'
? finish.overallGrade
: finish?.type === 'run_error'
? 'RED'
: null;
const summary =
finish?.type === 'run_finished' ? finish.summary : finish?.type === 'run_error' ? finish.message : null;
const touchpoints = useMemo(
() =>
events.filter(
(e): e is Extract<RunEvent, { type: 'touchpoint_result' }> => e.type === 'touchpoint_result',
),
[events],
);
const compliance = useMemo(
() =>
events.filter(
(e): e is Extract<RunEvent, { type: 'compliance_assertion' }> =>
e.type === 'compliance_assertion',
),
[events],
);
const logLines = useMemo(
() =>
events.filter(
(e) => e.type === 'stdout_line' || e.type === 'stderr_line',
) as Array<Extract<RunEvent, { type: 'stdout_line' | 'stderr_line' }>>,
[events],
);
const finished = Boolean(finish);
return (
<main className="mx-auto max-w-6xl px-4 py-10">
<Link href={`/runbooks/${runbookId}`} className="text-sm text-tardis-glow hover:underline">
Back to runbook
</Link>
<header className="mt-4 flex flex-wrap items-end justify-between gap-4">
<div>
<h1 className="text-2xl font-bold text-tardis-paper">Live run</h1>
<p className="mt-1 font-mono text-xs text-tardis-glow/80">{runId}</p>
</div>
<div className="flex flex-wrap gap-2">
<span
className={cn(
'rounded-lg border px-3 py-1 text-xs font-semibold uppercase tracking-wide',
gradeClass(overallGrade),
)}
>
{overallGrade ?? (events.length ? 'RUNNING' : 'CONNECTING')}
</span>
{finished ? (
<a
href={`/api/runs/${runId}/audit`}
className="inline-flex items-center gap-2 rounded-lg border border-tardis-glow/40 bg-tardis-bright/30 px-4 py-2 text-sm font-semibold text-white transition hover:bg-tardis-bright/50"
>
<Download className="h-4 w-4" aria-hidden />
Download audit pack (ZIP)
</a>
) : null}
</div>
</header>
{summary ? (
<p className="mt-4 rounded-lg border border-white/10 bg-black/25 p-3 text-sm text-tardis-paper/85">
{summary}
</p>
) : null}
{logError ? <p className="mt-2 text-sm text-tardis-amber">{logError}</p> : null}
<div className="mt-8 grid gap-6 lg:grid-cols-2">
<section className="mc-panel p-4">
<h2 className="flex items-center gap-2 text-lg font-semibold text-tardis-glow">
<Activity className="h-5 w-5" aria-hidden />
Live tracking
</h2>
<p className="mt-1 text-xs text-tardis-paper/55">
Timeline of what the system is doing right now.
</p>
<ul className="mt-4 max-h-[420px] space-y-2 overflow-y-auto font-mono text-xs">
{events.map((e, i) => {
if (e.type === 'stdout_line' || e.type === 'stderr_line') return null;
const label = (() => {
switch (e.type) {
case 'run_queued':
return 'Queued';
case 'allowlist_verified':
return 'Allowlist OK';
case 'step_started':
return `Step ${e.stepIndex + 1}/${e.stepTotal} start`;
case 'step_finished':
return `Step ${e.stepIndex + 1}/${e.stepTotal} done (exit ${e.exitCode})`;
case 'process_spawned':
return 'Started process';
case 'touchpoint_result':
return `Touchpoint: ${e.touchpointId}`;
case 'compliance_assertion':
return `Compliance: ${e.controlId}`;
case 'run_finished':
return 'Finished';
case 'run_error':
return 'Error';
default: {
const _x: never = e;
return _x;
}
}
})();
return (
<li
key={i}
className="rounded border border-white/10 bg-black/25 px-2 py-1.5 text-tardis-paper/85"
>
<span className="text-tardis-glow/70">{e.ts}</span> · {label}
{e.type === 'run_finished' ? ` · exit ${e.exitCode}` : null}
{e.type === 'run_error' ? ` · ${e.message}` : null}
</li>
);
})}
</ul>
</section>
<section className="mc-panel p-4">
<h2 className="flex items-center gap-2 text-lg font-semibold text-tardis-glow">
<Shield className="h-5 w-5" aria-hidden />
Data & compliance
</h2>
<p className="mt-1 text-xs text-tardis-paper/55">
Every check is graded. Green means the evidence matched what we expected.
</p>
<h3 className="mt-4 text-xs font-semibold uppercase tracking-wide text-tardis-amber">
Touchpoints
</h3>
<ul className="mt-2 max-h-48 space-y-2 overflow-y-auto text-sm">
{touchpoints.map((t, i) => (
<li
key={`${t.touchpointId}-${i}`}
className={cn(
'flex items-start gap-2 rounded border px-2 py-1.5',
t.grade === 'GREEN'
? 'border-emerald-500/35 bg-emerald-500/10'
: t.grade === 'RED'
? 'border-red-500/35 bg-red-500/10'
: 'border-tardis-amber/35 bg-tardis-amber/10',
)}
>
<CheckCircle2 className="mt-0.5 h-4 w-4 shrink-0 text-tardis-glow" aria-hidden />
<div>
<p className="font-medium text-tardis-paper">{t.touchpointId}</p>
<p className="text-xs text-tardis-paper/65">{t.evidence}</p>
</div>
</li>
))}
</ul>
<h3 className="mt-4 text-xs font-semibold uppercase tracking-wide text-tardis-amber">
Compliance assertions
</h3>
<ul className="mt-2 max-h-40 space-y-2 overflow-y-auto text-sm">
{compliance.map((c, i) => (
<li
key={`${c.controlId}-${i}`}
className={cn(
'rounded border px-2 py-1.5',
c.satisfied ? 'border-emerald-500/30 bg-emerald-500/10' : 'border-red-500/30 bg-red-500/10',
)}
>
<p className="font-mono text-xs text-tardis-glow">{c.controlId}</p>
<p className="text-xs text-tardis-paper/70">{c.evidence}</p>
</li>
))}
</ul>
</section>
</div>
<section className="mc-panel mt-6 p-4">
<h2 className="flex items-center gap-2 text-lg font-semibold text-tardis-glow">
<Terminal className="h-5 w-5" aria-hidden />
Technical log (stdout / stderr)
</h2>
<pre className="mt-3 max-h-80 overflow-auto rounded-lg bg-black/50 p-3 font-mono text-[11px] leading-relaxed text-tardis-paper/90">
{logLines.map((l, i) => (
<span key={i} className={l.type === 'stderr_line' ? 'text-tardis-amber' : undefined}>
{l.line}
{'\n'}
</span>
))}
</pre>
</section>
</main>
);
}

View File

@@ -0,0 +1,64 @@
import Link from 'next/link';
import { ChevronRight, BookOpen } from 'lucide-react';
import { loadAllRunbookSpecs } from '@/lib/load-specs';
export default function RunbooksIndexPage() {
let specs: ReturnType<typeof loadAllRunbookSpecs>;
let catalogError: string | null = null;
try {
specs = loadAllRunbookSpecs();
} catch (e) {
console.error('[mission-control] Failed to load runbook catalog:', e);
specs = [];
catalogError = e instanceof Error ? e.message : String(e);
}
return (
<main className="mx-auto max-w-4xl px-4 py-10">
<div className="mb-8">
<Link href="/" className="text-sm text-tardis-glow hover:underline">
Home
</Link>
<h1 className="mt-4 flex items-center gap-3 text-3xl font-bold text-tardis-paper">
<BookOpen className="h-8 w-8 text-tardis-glow" aria-hidden />
Runbooks
</h1>
<p className="mt-2 text-tardis-paper/70">
Pick a procedure. Each page explains what it does in plain language, asks only what is needed, and
records proof when you run it.
</p>
</div>
{catalogError ? (
<div
className="mb-6 rounded-lg border border-red-500/40 bg-red-950/40 p-4 text-sm text-red-200"
role="alert"
>
<p className="font-semibold">Runbook catalog could not be loaded</p>
<p className="mt-2 font-mono text-xs opacity-90">{catalogError}</p>
<p className="mt-2 text-tardis-paper/80">
Check <code className="text-tardis-glow">MISSION_CONTROL_PROJECT_ROOT</code>, regenerate{' '}
<code className="text-tardis-glow">runbooks/doc-manifest.json</code>, and see the server log.
</p>
</div>
) : null}
<ul className="space-y-3">
{specs.map((s) => (
<li key={s.id}>
<Link
href={`/runbooks/${s.id}`}
className="mc-panel flex items-center justify-between gap-4 p-4 transition hover:border-tardis-glow/50"
>
<div>
<p className="font-semibold text-tardis-paper">{s.title}</p>
<p className="mt-1 text-sm text-tardis-paper/65">{s.summary}</p>
</div>
<ChevronRight className="h-5 w-5 shrink-0 text-tardis-glow" aria-hidden />
</Link>
</li>
))}
</ul>
</main>
);
}

View File

@@ -0,0 +1,29 @@
'use client';
import { cn } from '@/lib/cn';
type Props = {
disabled?: boolean;
loading?: boolean;
onClick: () => void;
className?: string;
};
export function GoButton({ disabled, loading, onClick, className }: Props) {
return (
<button
type="button"
disabled={disabled || loading}
onClick={onClick}
className={cn(
'rounded-lg px-8 py-4 text-lg font-bold tracking-wide text-white shadow-lg transition',
'bg-red-600 hover:bg-red-500 hover:shadow-red-500/40',
'disabled:cursor-not-allowed disabled:opacity-50',
'focus:outline-none focus:ring-4 focus:ring-red-300/50',
className,
)}
>
{loading ? 'Running…' : 'GO BABY GO!'}
</button>
);
}

View File

@@ -0,0 +1,62 @@
'use client';
import { HelpCircle } from 'lucide-react';
import { useCallback, useEffect, useId, useRef, useState } from 'react';
import { cn } from '@/lib/cn';
type Props = {
title: string;
body: string;
example?: string;
className?: string;
};
export function HelpTip({ title, body, example, className }: Props) {
const [open, setOpen] = useState(false);
const rootRef = useRef<HTMLSpanElement>(null);
const panelId = useId();
const close = useCallback(() => setOpen(false), []);
useEffect(() => {
if (!open) return;
const onDoc = (e: MouseEvent) => {
if (!rootRef.current?.contains(e.target as Node)) close();
};
document.addEventListener('click', onDoc);
return () => document.removeEventListener('click', onDoc);
}, [open, close]);
return (
<span ref={rootRef} className={cn('relative inline-flex align-middle', className)}>
<button
type="button"
className="ml-1 rounded-full border border-tardis-glow/40 bg-tardis-panel/80 p-0.5 text-tardis-glow transition hover:bg-tardis-bright/30 hover:shadow-tardis focus:outline-none focus:ring-2 focus:ring-tardis-glow"
aria-expanded={open}
aria-controls={panelId}
aria-label={`Help: ${title}`}
onClick={(e) => {
e.stopPropagation();
setOpen((o) => !o);
}}
>
<HelpCircle className="h-4 w-4" aria-hidden />
</button>
{open ? (
<span
id={panelId}
role="tooltip"
className="absolute left-0 top-full z-50 mt-2 w-72 rounded-lg border border-tardis-glow/30 bg-tardis-deep/98 p-3 text-left text-xs text-tardis-paper shadow-tardis"
>
<p className="font-semibold text-tardis-glow">{title}</p>
<p className="mt-2 leading-relaxed text-tardis-paper/90">{body}</p>
{example ? (
<pre className="mt-2 max-h-32 overflow-auto rounded bg-black/40 p-2 font-mono text-[10px] text-tardis-amber">
{example}
</pre>
) : null}
</span>
) : null}
</span>
);
}

View File

@@ -0,0 +1,9 @@
import type { RunbookSpec } from '@/lib/runbook-schema';
import { validateRunbookExecution } from '@/lib/execution-path-validator';
/**
* Ensures every script path in the runbook exists and is under allowlisted prefixes.
*/
export function assertRunbookAllowlisted(spec: RunbookSpec, repoRoot: string): void {
validateRunbookExecution(repoRoot, spec);
}

View File

@@ -0,0 +1,91 @@
import fs from 'node:fs';
import path from 'node:path';
import { createHash } from 'node:crypto';
import { execSync } from 'node:child_process';
import archiver from 'archiver';
import { getJobStore } from '@/lib/job-store';
import { loadRunbookSpec } from '@/lib/load-specs';
import { getProjectRoot } from '@/lib/paths';
function gitSha(root: string): string {
try {
return execSync('git rev-parse HEAD', { cwd: root, encoding: 'utf8' }).trim();
} catch {
return 'unknown';
}
}
function sha256File(filePath: string): string {
const h = createHash('sha256');
h.update(fs.readFileSync(filePath));
return h.digest('hex');
}
/**
* Builds immutable audit bundle (zip) for a completed run.
*/
export async function buildAuditZipBuffer(runId: string): Promise<Buffer> {
const store = getJobStore();
const meta = store.readMeta(runId);
if (!meta) {
throw new Error('Run not found');
}
const dir = store.runDir(runId);
const root = getProjectRoot();
const spec = loadRunbookSpec(meta.runbookId);
const manifest = {
schema: 'mission-control.audit-bundle.v1',
runId,
runbookId: meta.runbookId,
runbookTitle: spec?.title ?? meta.runbookId,
createdAtUtc: new Date().toISOString(),
repositoryRoot: root,
gitCommit: gitSha(root),
missionControlVersion: '1.0.0',
meta,
integrityNote:
'SHA-256 checksums for payload files are listed in checksums.sha256. Retain this bundle for audit.',
complianceFramework: spec?.complianceFramework ?? 'DBIS-MC-INTERNAL-1',
};
const chunks: Buffer[] = [];
const archive = archiver('zip', { zlib: { level: 9 } });
archive.on('data', (c: Buffer) => chunks.push(c));
const done = new Promise<void>((resolve, reject) => {
archive.on('end', resolve);
archive.on('error', reject);
});
archive.append(JSON.stringify(manifest, null, 2), { name: 'manifest.json' });
const filesToHash: { name: string; abs: string }[] = [];
for (const name of [
'events.jsonl',
'inputs.redacted.json',
'touchpoints.final.json',
'compliance.json',
'stdout.log',
'stderr.log',
'meta.json',
]) {
const abs = path.join(dir, name);
if (fs.existsSync(abs)) {
archive.file(abs, { name: `payload/${name}` });
filesToHash.push({ name, abs });
}
}
const checksumLines: string[] = [];
for (const f of filesToHash) {
checksumLines.push(`${sha256File(f.abs)} ${f.name}`);
}
archive.append(checksumLines.join('\n') + '\n', { name: 'checksums.sha256' });
await archive.finalize();
await done;
return Buffer.concat(chunks);
}

View File

@@ -0,0 +1,6 @@
import { clsx, type ClassValue } from 'clsx';
import { twMerge } from 'tailwind-merge';
export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs));
}

View File

@@ -0,0 +1,23 @@
import type { RunbookSpec } from '@/lib/runbook-schema';
export function coerceRunbookInputs(
spec: RunbookSpec,
raw: Record<string, unknown>,
): Record<string, unknown> {
const out: Record<string, unknown> = { ...raw };
for (const field of spec.inputs) {
if (out[field.name] === undefined && field.default !== undefined) {
out[field.name] = field.default;
}
if (field.type === 'boolean') {
const v = out[field.name];
if (v === 'true' || v === true) out[field.name] = true;
else if (v === 'false' || v === false) out[field.name] = false;
}
if (field.type === 'number' && typeof out[field.name] === 'string') {
const n = Number(out[field.name]);
if (!Number.isNaN(n)) out[field.name] = n;
}
}
return out;
}

View File

@@ -0,0 +1,46 @@
import fs from 'node:fs';
import path from 'node:path';
import type { RunbookSpec } from '@/lib/runbook-schema';
/** Directory prefixes under repo root (no leading/trailing slash). */
const ALLOWED_PREFIXES = ['scripts', 'explorer-monorepo/scripts'] as const;
function normalizeRelative(p: string): string {
const s = p.replace(/^\.\//, '').replace(/\\/g, '/');
if (s.includes('..') || path.isAbsolute(s)) {
throw new Error(`Unsafe script path: ${p}`);
}
return s;
}
export function assertScriptPathAllowed(repoRoot: string, scriptRelative: string): string {
const rel = normalizeRelative(scriptRelative);
const ok = ALLOWED_PREFIXES.some(
(pre) => rel === pre || rel.startsWith(`${pre}/`),
);
if (!ok) {
throw new Error(
`Script path not allowlisted (must be under ${ALLOWED_PREFIXES.join(' or ')}): ${rel}`,
);
}
const abs = path.resolve(repoRoot, rel);
const rootResolved = path.resolve(repoRoot);
const relToRoot = path.relative(rootResolved, abs);
const outside =
relToRoot.startsWith(`..${path.sep}`) ||
relToRoot === '..' ||
path.isAbsolute(relToRoot);
if (outside) {
throw new Error(`Script resolves outside repository: ${rel}`);
}
if (!fs.existsSync(abs)) {
throw new Error(`Script not found: ${rel}`);
}
return abs;
}
export function validateRunbookExecution(repoRoot: string, spec: RunbookSpec): void {
for (const step of spec.execution.steps) {
assertScriptPathAllowed(repoRoot, step.scriptRelative);
}
}

View File

@@ -0,0 +1,107 @@
import path from 'node:path';
import fs from 'node:fs';
import type { RunbookSpec, ExecutionStep } from '@/lib/runbook-schema';
import { assertScriptPathAllowed } from '@/lib/execution-path-validator';
export type ResolvedCommand = {
program: string;
args: string[];
cwd: string;
shell: boolean;
};
function resolveBash(): string {
if (process.platform === 'win32') {
const candidates = [
process.env.GIT_BASH_PATH,
'C:\\Program Files\\Git\\bin\\bash.exe',
'C:\\Program Files (x86)\\Git\\bin\\bash.exe',
].filter(Boolean) as string[];
for (const c of candidates) {
if (fs.existsSync(c)) return c;
}
return 'bash';
}
return 'bash';
}
function substituteTemplates(s: string, inputs: Record<string, unknown>): string {
return s.replace(/\{\{(\w+)\}\}/g, (_, key: string) => {
const v = inputs[key];
if (v === undefined || v === null) return '';
return String(v);
});
}
function isTruthyInput(v: unknown): boolean {
if (v === true || v === 'true') return true;
if (typeof v === 'string' && v.trim().length > 0) return true;
return false;
}
function buildArgsForStep(
step: ExecutionStep,
inputs: Record<string, unknown>,
): string[] {
const base = (step.args ?? []).map((a) => substituteTemplates(a, inputs));
const extra: string[] = [];
if (step.whenInputTrue) {
for (const [inputName, flags] of Object.entries(step.whenInputTrue)) {
if (isTruthyInput(inputs[inputName])) {
extra.push(...flags.map((f) => substituteTemplates(f, inputs)));
}
}
}
const practice = inputs.practiceMode === true || inputs.practiceMode === 'true';
if (practice && step.supportsDryRun && !extra.includes('--dry-run')) {
extra.push('--dry-run');
}
return [...base, ...extra];
}
/**
* Resolves every execution step to a concrete spawn command (allowlisted paths only).
*/
export function buildExecutionPlan(
spec: RunbookSpec,
inputs: Record<string, unknown>,
repoRoot: string,
): ResolvedCommand[] {
const bash = resolveBash();
const out: ResolvedCommand[] = [];
for (const step of spec.execution.steps) {
const absScript = assertScriptPathAllowed(repoRoot, step.scriptRelative);
const args = buildArgsForStep(step, inputs);
if (step.interpreter === 'node') {
out.push({
program: process.execPath,
args: [absScript, ...args],
cwd: repoRoot,
shell: false,
});
} else {
out.push({
program: bash,
args: [absScript, ...args],
cwd: repoRoot,
shell: false,
});
}
}
return out;
}
export function checkBashAvailable(cmd: ResolvedCommand): boolean {
if (cmd.program === process.execPath) return true;
const p = cmd.program;
if ((p === 'bash' || p === 'bash.exe') && process.platform === 'win32') {
return false;
}
if (/bash\.exe$/i.test(p)) {
return fs.existsSync(p);
}
return true;
}

View File

@@ -0,0 +1,36 @@
import { describe, it, expect, beforeEach } from 'vitest';
import { getJobStore } from '@/lib/job-store';
import { queueRun } from '@/lib/executor';
async function waitForTerminal(runId: string, maxMs = 45_000): Promise<void> {
const store = getJobStore();
const start = Date.now();
while (Date.now() - start < maxMs) {
const m = store.readMeta(runId);
if (
m?.status === 'succeeded' ||
m?.status === 'failed' ||
m?.status === 'error'
) {
return;
}
await new Promise((r) => setTimeout(r, 150));
}
throw new Error(`Run ${runId} did not finish within ${maxMs}ms`);
}
describe('mission-control executor', () => {
beforeEach(() => {
// Fresh job store per test file run in same process — UUIDs avoid collision
});
it('runs health-self-check to success', async () => {
const { runId } = queueRun('health-self-check', {});
await waitForTerminal(runId);
const meta = getJobStore().readMeta(runId);
expect(meta?.status).toBe('succeeded');
expect(meta?.overallGrade).toBe('GREEN');
const events = getJobStore().readEvents(runId);
expect(events.some((e) => e.type === 'stdout_line')).toBe(true);
});
});

View File

@@ -0,0 +1,382 @@
import { spawn } from 'node:child_process';
import fs from 'node:fs';
import { assertRunbookAllowlisted } from '@/lib/allowlist';
import { getJobStore, type RunMeta } from '@/lib/job-store';
import type { RunEvent } from '@/lib/run-events';
import { loadRunbookSpec } from '@/lib/load-specs';
import { redactInputs } from '@/lib/redact';
import { createTouchpointTracker } from '@/lib/touchpoint-evaluator';
import { ensureDirSync, getProjectRoot, getRunDataDir } from '@/lib/paths';
import type { RunbookSpec } from '@/lib/runbook-schema';
import { buildExecutionPlan, checkBashAvailable, type ResolvedCommand } from '@/lib/execution-plan';
function nowIso(): string {
return new Date().toISOString();
}
function redactArgs(args: string[]): string[] {
return args.map((a) => (a.length > 120 ? `${a.slice(0, 40)}...[truncated]` : a));
}
function overallGradeFrom(
exit: number | null,
touchFail: boolean,
threw: boolean,
): 'GREEN' | 'AMBER' | 'RED' {
if (threw) return 'RED';
if (exit !== 0) return 'RED';
if (touchFail) return 'AMBER';
return 'GREEN';
}
function complianceRows(
spec: RunbookSpec,
touchpoints: { id: string; status: string; evidence: string; grade: string }[],
): { controlId: string; framework: string; satisfied: boolean; evidence: string }[] {
return touchpoints.map((t) => ({
controlId: `MC-${spec.id.toUpperCase().replace(/[^A-Z0-9]+/g, '_')}-${t.id}`,
framework: spec.complianceFramework,
satisfied: t.status === 'PASS' && t.grade !== 'RED',
evidence: t.evidence,
}));
}
function buildChildEnv(
root: string,
rawInputs: Record<string, unknown>,
): NodeJS.ProcessEnv {
const env: NodeJS.ProcessEnv = { ...process.env, PROJECT_ROOT: root };
const ph = rawInputs.proxmoxHost;
if (ph !== undefined && ph !== null && String(ph).trim() !== '') {
env.PROXMOX_HOST = String(ph).trim();
}
const rpc = rawInputs.rpcUrlOverride;
if (typeof rpc === 'string' && rpc.trim() !== '') {
env.RPC_URL_138 = rpc.trim();
}
return env;
}
function runOneCommand(
runId: string,
store: ReturnType<typeof getJobStore>,
cmd: ResolvedCommand,
env: NodeJS.ProcessEnv,
onStdout: (chunk: string) => void,
onStderr: (chunk: string) => void,
): Promise<number | null> {
return new Promise((resolvePromise, rejectPromise) => {
const child = spawn(cmd.program, cmd.args, {
cwd: cmd.cwd,
shell: cmd.shell,
env,
});
child.stdout?.on('data', (d: Buffer) => {
const chunk = d.toString('utf8');
onStdout(chunk);
for (const line of chunk.split('\n')) {
if (line.length === 0) continue;
store.appendEvent(runId, { type: 'stdout_line', ts: nowIso(), line });
}
});
child.stderr?.on('data', (d: Buffer) => {
const chunk = d.toString('utf8');
onStderr(chunk);
for (const line of chunk.split('\n')) {
if (line.length === 0) continue;
store.appendEvent(runId, { type: 'stderr_line', ts: nowIso(), line });
}
});
child.on('error', (err) => {
rejectPromise(err);
});
child.on('close', (code) => {
resolvePromise(code);
});
});
}
export async function executeRunbook(
runId: string,
spec: RunbookSpec,
rawInputs: Record<string, unknown>,
): Promise<void> {
const store = getJobStore();
const root = getProjectRoot();
let meta = store.readMeta(runId);
if (!meta) {
throw new Error('Run meta missing');
}
try {
assertRunbookAllowlisted(spec, root);
const detail =
(spec.executionNote ? `${spec.executionNote} ` : '') +
`${spec.execution.steps.length} allowlisted step(s).`;
store.appendEvent(runId, {
type: 'allowlist_verified',
ts: nowIso(),
detail,
});
const plan = buildExecutionPlan(spec, rawInputs, root);
const childEnv = buildChildEnv(root, rawInputs);
for (const cmd of plan) {
if (!checkBashAvailable(cmd)) {
store.appendEvent(runId, {
type: 'run_error',
ts: nowIso(),
message:
'Git Bash not found. Install Git for Windows, set GIT_BASH_PATH to bash.exe, or run from WSL/Linux.',
overallGrade: 'RED',
});
const bashErr: RunMeta = {
id: runId,
runbookId: meta.runbookId,
createdAt: meta.createdAt,
startedAt: meta.startedAt,
status: 'error',
finishedAt: nowIso(),
exitCode: null,
overallGrade: 'RED',
summary: 'Bash missing on Windows',
};
store.writeMeta(runId, bashErr);
return;
}
}
const running: RunMeta = {
id: runId,
runbookId: meta.runbookId,
createdAt: meta.createdAt,
status: 'running',
startedAt: nowIso(),
exitCode: null,
overallGrade: null,
};
meta = running;
store.writeMeta(runId, running);
const tracker = createTouchpointTracker(spec);
let stdoutBuf = '';
let stderrBuf = '';
let lastCode: number | null = 0;
for (let i = 0; i < plan.length; i++) {
const cmd = plan[i];
const scriptArg = cmd.args[0] ?? '';
if (
(cmd.program.includes('bash') || scriptArg.endsWith('.sh')) &&
scriptArg &&
!fs.existsSync(scriptArg)
) {
store.appendEvent(runId, {
type: 'run_error',
ts: nowIso(),
message: `Script not found: ${scriptArg}`,
overallGrade: 'RED',
});
const miss: RunMeta = {
id: runId,
runbookId: meta.runbookId,
createdAt: meta.createdAt,
startedAt: meta.startedAt,
status: 'error',
finishedAt: nowIso(),
exitCode: null,
overallGrade: 'RED',
summary: 'Script missing',
};
store.writeMeta(runId, miss);
return;
}
store.appendEvent(runId, {
type: 'step_started',
ts: nowIso(),
stepIndex: i,
stepTotal: plan.length,
scriptRelative: spec.execution.steps[i]?.scriptRelative ?? '',
program: cmd.program,
argsRedacted: redactArgs(cmd.args),
});
store.appendEvent(runId, {
type: 'process_spawned',
ts: nowIso(),
program: cmd.program,
argsRedacted: redactArgs(cmd.args),
cwd: cmd.cwd,
});
try {
lastCode = await runOneCommand(
runId,
store,
cmd,
childEnv,
(chunk) => {
stdoutBuf += chunk;
tracker.ingestStdout(chunk);
},
(chunk) => {
stderrBuf += chunk;
},
);
} catch (spawnErr) {
const msg = spawnErr instanceof Error ? spawnErr.message : String(spawnErr);
store.appendEvent(runId, {
type: 'step_finished',
ts: nowIso(),
stepIndex: i,
stepTotal: plan.length,
exitCode: null,
error: msg,
});
throw spawnErr;
}
store.appendEvent(runId, {
type: 'step_finished',
ts: nowIso(),
stepIndex: i,
stepTotal: plan.length,
exitCode: lastCode,
});
if (lastCode !== 0) {
break;
}
}
store.writeStdoutSnapshot(runId, stdoutBuf);
store.writeStderrSnapshot(runId, stderrBuf);
const finalTp = tracker.finalize(lastCode);
store.writeTouchpoints(runId, finalTp);
let touchFail = false;
for (const t of finalTp) {
if (t.status === 'FAIL') touchFail = true;
store.appendEvent(runId, {
type: 'touchpoint_result',
ts: nowIso(),
touchpointId: t.id,
status: t.status,
evidence: t.evidence,
grade: t.grade,
});
}
const comp = complianceRows(spec, finalTp);
store.writeCompliance(runId, comp);
for (const row of comp) {
store.appendEvent(runId, {
type: 'compliance_assertion',
ts: nowIso(),
controlId: row.controlId,
framework: row.framework,
satisfied: row.satisfied,
evidence: row.evidence,
});
}
const grade = overallGradeFrom(lastCode, touchFail, false);
const summary =
lastCode === 0 && !touchFail
? `All ${plan.length} step(s) completed successfully; touchpoints satisfied.`
: lastCode !== 0
? `Stopped after non-zero exit (code ${lastCode}).`
: 'Exit 0 but one or more touchpoints require review.';
store.appendEvent(runId, {
type: 'run_finished',
ts: nowIso(),
exitCode: lastCode,
overallGrade: grade,
summary,
});
const ok = lastCode === 0 && !touchFail;
const m = meta as RunMeta;
const finished: RunMeta = {
id: runId,
runbookId: m.runbookId,
createdAt: m.createdAt,
startedAt: m.startedAt,
status: ok ? 'succeeded' : 'failed',
finishedAt: nowIso(),
exitCode: lastCode,
overallGrade: grade,
summary,
};
store.writeMeta(runId, finished);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
store.appendEvent(runId, {
type: 'run_error',
ts: nowIso(),
message: msg,
overallGrade: 'RED',
});
const cur = store.readMeta(runId);
if (!cur) return;
const errMeta: RunMeta = {
id: runId,
runbookId: cur.runbookId,
createdAt: cur.createdAt,
startedAt: cur.startedAt,
status: 'error',
finishedAt: nowIso(),
exitCode: null,
overallGrade: 'RED',
summary: msg,
};
store.writeMeta(runId, errMeta);
}
}
export function queueRun(
runbookId: string,
rawInputs: Record<string, unknown>,
): { runId: string } {
const spec = loadRunbookSpec(runbookId);
if (!spec) {
throw new Error(`Unknown runbook: ${runbookId}`);
}
assertRunbookAllowlisted(spec, getProjectRoot());
const store = getJobStore();
const runId = store.createRunId();
const redacted = redactInputs(spec, rawInputs);
ensureDirSync(getRunDataDir());
store.writeMeta(runId, {
id: runId,
runbookId,
status: 'queued',
createdAt: nowIso(),
exitCode: null,
overallGrade: null,
});
store.writeInputs(runId, redacted);
store.appendEvent(runId, {
type: 'run_queued',
ts: nowIso(),
runbookId,
specVersion: '2.0',
});
void executeRunbook(runId, spec, rawInputs);
return { runId };
}

View File

@@ -0,0 +1,111 @@
import { EventEmitter } from 'node:events';
import fs from 'node:fs';
import path from 'node:path';
import crypto from 'node:crypto';
import type { RunEvent } from '@/lib/run-events';
import { ensureDirSync, getRunDataDir } from '@/lib/paths';
export type RunStatus = 'queued' | 'running' | 'succeeded' | 'failed' | 'error';
export type RunMeta = {
id: string;
runbookId: string;
status: RunStatus;
createdAt: string;
startedAt?: string;
finishedAt?: string;
exitCode: number | null;
overallGrade: 'GREEN' | 'AMBER' | 'RED' | null;
summary?: string;
};
const g = globalThis as unknown as {
__missionControlJobStore?: JobStore;
};
class JobStore extends EventEmitter {
private readonly runBuses = new Map<string, EventEmitter>();
getRunBus(runId: string): EventEmitter {
let b = this.runBuses.get(runId);
if (!b) {
b = new EventEmitter();
this.runBuses.set(runId, b);
}
return b;
}
createRunId(): string {
return crypto.randomUUID();
}
runDir(runId: string): string {
return path.join(getRunDataDir(), runId);
}
appendEvent(runId: string, event: RunEvent): void {
const dir = this.runDir(runId);
ensureDirSync(dir);
const line = JSON.stringify(event) + '\n';
fs.appendFileSync(path.join(dir, 'events.jsonl'), line, 'utf8');
this.getRunBus(runId).emit('event', event);
this.emit('event', { runId, event });
}
writeMeta(runId: string, meta: RunMeta): void {
const dir = this.runDir(runId);
ensureDirSync(dir);
fs.writeFileSync(path.join(dir, 'meta.json'), JSON.stringify(meta, null, 2), 'utf8');
}
readMeta(runId: string): RunMeta | null {
const p = path.join(this.runDir(runId), 'meta.json');
if (!fs.existsSync(p)) return null;
return JSON.parse(fs.readFileSync(p, 'utf8')) as RunMeta;
}
readEvents(runId: string): RunEvent[] {
const p = path.join(this.runDir(runId), 'events.jsonl');
if (!fs.existsSync(p)) return [];
const text = fs.readFileSync(p, 'utf8');
const lines = text.split('\n').filter(Boolean);
return lines.map((l) => JSON.parse(l) as RunEvent);
}
writeStdoutSnapshot(runId: string, content: string): void {
const dir = this.runDir(runId);
ensureDirSync(dir);
fs.writeFileSync(path.join(dir, 'stdout.log'), content, 'utf8');
}
writeStderrSnapshot(runId: string, content: string): void {
const dir = this.runDir(runId);
ensureDirSync(dir);
fs.writeFileSync(path.join(dir, 'stderr.log'), content, 'utf8');
}
writeInputs(runId: string, inputs: Record<string, unknown>): void {
const dir = this.runDir(runId);
ensureDirSync(dir);
fs.writeFileSync(path.join(dir, 'inputs.redacted.json'), JSON.stringify(inputs, null, 2), 'utf8');
}
writeTouchpoints(runId: string, touchpoints: unknown[]): void {
const dir = this.runDir(runId);
ensureDirSync(dir);
fs.writeFileSync(path.join(dir, 'touchpoints.final.json'), JSON.stringify(touchpoints, null, 2), 'utf8');
}
writeCompliance(runId: string, rows: unknown[]): void {
const dir = this.runDir(runId);
ensureDirSync(dir);
fs.writeFileSync(path.join(dir, 'compliance.json'), JSON.stringify(rows, null, 2), 'utf8');
}
}
export function getJobStore(): JobStore {
if (!g.__missionControlJobStore) {
g.__missionControlJobStore = new JobStore();
}
return g.__missionControlJobStore;
}

View File

@@ -0,0 +1,109 @@
export type LaunchDestination = {
id: string;
title: string;
description: string;
href: string;
kind: 'external' | 'docs';
};
function envUrl(key: string, fallback: string): string {
if (typeof process === 'undefined') return fallback;
const v = process.env[key];
return v && v.length > 0 ? v : fallback;
}
export function getLaunchDestinations(): LaunchDestination[] {
return [
{
id: 'helper-site',
title: 'Proxmox helper scripts site',
description: 'Browse community Proxmox helper scripts and metadata (run separately on port 3000).',
href: envUrl('NEXT_PUBLIC_HELPER_SCRIPTS_URL', 'http://localhost:3000'),
kind: 'external',
},
{
id: 'explorer',
title: 'Chain 138 explorer',
description: 'Block explorer UI when deployed (set URL for your environment).',
href: envUrl('NEXT_PUBLIC_EXPLORER_URL', 'https://explorer.d-bis.org'),
kind: 'external',
},
{
id: 'phoenix-deploy-api',
title: 'Phoenix Deploy API',
description:
'Gitea webhooks, deploy stub, Proxmox VE rail, health summary. Run locally: cd phoenix-deploy-api && npm run dev (default port 4001).',
href: envUrl('NEXT_PUBLIC_PHOENIX_DEPLOY_API_URL', 'http://localhost:4001/health'),
kind: 'external',
},
{
id: 'docs-master',
title: 'Documentation index',
description: 'Master documentation index in this repository.',
href: envUrl('NEXT_PUBLIC_DOCS_MASTER_URL', 'https://gitea.d-bis.org/d-bis/proxmox/src/branch/main/docs/MASTER_INDEX.md'),
kind: 'docs',
},
{
id: 'operational-runbooks',
title: 'Operational runbooks (markdown)',
description: 'Canonical operational runbook index for deep procedures.',
href: envUrl(
'NEXT_PUBLIC_OPERATIONAL_RUNBOOKS_URL',
'https://gitea.d-bis.org/d-bis/proxmox/src/branch/main/docs/03-deployment/OPERATIONAL_RUNBOOKS.md',
),
kind: 'docs',
},
{
id: 'meta-testnet-2138-runbook',
title: 'Meta testnet (2138) runbook',
description: 'DeFi Oracle Meta testnet deployment and wallet flow (Wagmi, MetaMask JSON, RPC).',
href: envUrl(
'NEXT_PUBLIC_TESTNET_2138_RUNBOOK_URL',
'https://gitea.d-bis.org/d-bis/proxmox/src/branch/main/docs/testnet/DEFI_ORACLE_META_TESTNET_2138_RUNBOOK.md',
),
kind: 'docs',
},
{
id: 'aggregator-route-matrix',
title: 'Aggregator route matrix',
description: 'JSON matrix of NPM / aggregator routes for operators (also available as CSV in config/).',
href: envUrl(
'NEXT_PUBLIC_ROUTE_MATRIX_URL',
'https://gitea.d-bis.org/d-bis/proxmox/src/branch/main/config/aggregator-route-matrix.json',
),
kind: 'docs',
},
{
id: 'e2e-endpoints-doc',
title: 'E2E endpoint inventory',
description:
'Canonical list of public web, API, and RPC endpoints used by verify-end-to-end-routing.sh (profiles, evidence, Gitea Actions pointers).',
href: envUrl(
'NEXT_PUBLIC_E2E_ENDPOINTS_DOC_URL',
'https://gitea.d-bis.org/d-bis/proxmox/src/branch/main/docs/04-configuration/E2E_ENDPOINTS_LIST.md',
),
kind: 'docs',
},
{
id: 'public-docs-site',
title: 'Public docs (docs.d-bis.org)',
description: 'Hosted documentation when routed via NPM (E2E verifier target).',
href: envUrl('NEXT_PUBLIC_DOCS_SITE_URL', 'https://docs.d-bis.org'),
kind: 'external',
},
{
id: 'gitea',
title: 'Gitea',
description: 'Git hosting and Actions; pair with scripts/verify/print-gitea-actions-urls.sh for job URLs.',
href: envUrl('NEXT_PUBLIC_GITEA_URL', 'https://gitea.d-bis.org'),
kind: 'external',
},
{
id: 'chain138-dapp',
title: 'Chain 138 DApp',
description: 'Bridge / dapp frontend (dapp.d-bis.org) when deployed.',
href: envUrl('NEXT_PUBLIC_CHAIN138_DAPP_URL', 'https://dapp.d-bis.org'),
kind: 'external',
},
];
}

View File

@@ -0,0 +1,16 @@
import { describe, it, expect } from 'vitest';
import { loadAllRunbookSpecs } from '@/lib/load-specs';
describe('runbook catalog', () => {
it('merges hand-written specs with all doc-derived runbooks', () => {
const all = loadAllRunbookSpecs();
expect(all.length).toBeGreaterThanOrEqual(58);
const ids = new Set(all.map((s) => s.id));
expect(ids.has('health-self-check')).toBe(true);
expect(ids.has('check-chain138-rpc-health')).toBe(true);
expect([...ids].some((id) => id.startsWith('doc-'))).toBe(true);
for (const s of all) {
expect(s.execution.steps.length).toBeGreaterThan(0);
}
});
});

View File

@@ -0,0 +1,82 @@
import fs from 'node:fs';
import path from 'node:path';
import { runbookSpecSchema, type RunbookSpec } from '@/lib/runbook-schema';
import { getMissionControlDir } from '@/lib/paths';
export function getSpecsDir(): string {
return path.join(getMissionControlDir(), 'runbooks', 'specs');
}
let docManifestCache: RunbookSpec[] | null = null;
function getDocManifestPath(): string {
return path.join(getMissionControlDir(), 'runbooks', 'doc-manifest.json');
}
function loadDocManifestRunbooks(): RunbookSpec[] {
if (docManifestCache) return docManifestCache;
const p = getDocManifestPath();
if (!fs.existsSync(p)) {
docManifestCache = [];
return docManifestCache;
}
const raw = JSON.parse(fs.readFileSync(p, 'utf8')) as { runbooks?: unknown[] };
const list = raw.runbooks ?? [];
const out: RunbookSpec[] = [];
for (const item of list) {
try {
out.push(runbookSpecSchema.parse(item));
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
console.error('[mission-control] Skipping invalid doc-manifest entry:', msg);
}
}
docManifestCache = out;
return out;
}
function loadJsonSpecsFromDir(): RunbookSpec[] {
const dir = getSpecsDir();
if (!fs.existsSync(dir)) {
return [];
}
const files = fs.readdirSync(dir).filter((f) => f.endsWith('.json'));
const out: RunbookSpec[] = [];
for (const file of files) {
const fp = path.join(dir, file);
try {
const raw = fs.readFileSync(fp, 'utf8');
const parsed = JSON.parse(raw) as unknown;
out.push(runbookSpecSchema.parse(parsed));
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
console.error(`[mission-control] Skipping invalid runbook spec ${file}:`, msg);
}
}
return out;
}
export function loadAllRunbookSpecs(): RunbookSpec[] {
const byId = new Map<string, RunbookSpec>();
for (const spec of loadDocManifestRunbooks()) {
byId.set(spec.id, spec);
}
for (const spec of loadJsonSpecsFromDir()) {
byId.set(spec.id, spec);
}
return [...byId.values()].sort((a, b) => a.title.localeCompare(b.title));
}
export function loadRunbookSpec(id: string): RunbookSpec | null {
const fp = path.join(getSpecsDir(), `${id}.json`);
if (fs.existsSync(fp)) {
try {
const raw = fs.readFileSync(fp, 'utf8');
return runbookSpecSchema.parse(JSON.parse(raw));
} catch {
return null;
}
}
const fromDoc = loadDocManifestRunbooks().find((r) => r.id === id);
return fromDoc ?? null;
}

View File

@@ -0,0 +1,46 @@
import fs from 'node:fs';
import path from 'node:path';
/**
* Monorepo root (parent of mission-control/). Resolves from cwd or MISSION_CONTROL_PROJECT_ROOT.
*/
export function getProjectRoot(): string {
const override = process.env.MISSION_CONTROL_PROJECT_ROOT?.trim();
if (override) {
const abs = path.resolve(override);
if (!fs.existsSync(abs)) {
console.error(
`[mission-control] MISSION_CONTROL_PROJECT_ROOT does not exist (${abs}); falling back to auto-detect.`,
);
} else {
return abs;
}
}
const cwd = process.cwd();
const fromMc = path.resolve(cwd, '..');
const marker = path.join(fromMc, 'pnpm-workspace.yaml');
if (fs.existsSync(marker)) {
return fromMc;
}
const fromNested = path.resolve(cwd, '../..');
if (fs.existsSync(path.join(fromNested, 'pnpm-workspace.yaml'))) {
return fromNested;
}
return cwd;
}
export function getMissionControlDir(): string {
return path.join(getProjectRoot(), 'mission-control');
}
export function getRunDataDir(): string {
const dir = path.join(getMissionControlDir(), '.data', 'runs');
return dir;
}
export function ensureDirSync(dir: string): void {
fs.mkdirSync(dir, { recursive: true });
}

View File

@@ -0,0 +1,19 @@
import type { RunbookSpec } from '@/lib/runbook-schema';
export function redactInputs(
spec: RunbookSpec,
inputs: Record<string, unknown>,
): Record<string, unknown> {
const sensitive = new Set(
spec.inputs.filter((i) => i.sensitive).map((i) => i.name),
);
const out: Record<string, unknown> = {};
for (const [k, v] of Object.entries(inputs)) {
if (sensitive.has(k)) {
out[k] = '[REDACTED]';
} else {
out[k] = v;
}
}
return out;
}

View File

@@ -0,0 +1,80 @@
import { z } from 'zod';
export const runEventSchema = z.discriminatedUnion('type', [
z.object({
type: z.literal('run_queued'),
ts: z.string(),
runbookId: z.string(),
specVersion: z.string(),
}),
z.object({
type: z.literal('allowlist_verified'),
ts: z.string(),
detail: z.string(),
}),
z.object({
type: z.literal('step_started'),
ts: z.string(),
stepIndex: z.number(),
stepTotal: z.number(),
scriptRelative: z.string(),
program: z.string(),
argsRedacted: z.array(z.string()),
}),
z.object({
type: z.literal('step_finished'),
ts: z.string(),
stepIndex: z.number(),
stepTotal: z.number(),
exitCode: z.number().nullable(),
error: z.string().optional(),
}),
z.object({
type: z.literal('process_spawned'),
ts: z.string(),
program: z.string(),
argsRedacted: z.array(z.string()),
cwd: z.string(),
}),
z.object({
type: z.literal('stdout_line'),
ts: z.string(),
line: z.string(),
}),
z.object({
type: z.literal('stderr_line'),
ts: z.string(),
line: z.string(),
}),
z.object({
type: z.literal('touchpoint_result'),
ts: z.string(),
touchpointId: z.string(),
status: z.enum(['PASS', 'FAIL', 'PENDING']),
evidence: z.string(),
grade: z.enum(['GREEN', 'AMBER', 'RED']),
}),
z.object({
type: z.literal('compliance_assertion'),
ts: z.string(),
controlId: z.string(),
framework: z.string(),
satisfied: z.boolean(),
evidence: z.string(),
}),
z.object({
type: z.literal('run_finished'),
ts: z.string(),
exitCode: z.number().nullable(),
overallGrade: z.enum(['GREEN', 'AMBER', 'RED']),
summary: z.string(),
}),
z.object({
type: z.literal('run_error'),
ts: z.string(),
message: z.string(),
overallGrade: z.literal('RED'),
}),
]);
export type RunEvent = z.infer<typeof runEventSchema>;

View File

@@ -0,0 +1,61 @@
import { z } from 'zod';
const inputFieldSchema = z.object({
name: z.string(),
label: z.string(),
type: z.enum(['boolean', 'string', 'number', 'select']),
help: z.string(),
example: z.string().optional(),
sensitive: z.boolean().optional(),
default: z.union([z.string(), z.boolean(), z.number()]).optional(),
options: z.array(z.object({ value: z.string(), label: z.string() })).optional(),
});
const touchpointSchema = z.object({
id: z.string(),
label: z.string(),
description: z.string(),
passCondition: z.enum(['exit_zero', 'stdout_contains', 'stdout_not_contains']),
pattern: z.string().optional(),
});
const stepSchema = z.object({
title: z.string(),
plainText: z.string(),
technicalNote: z.string().optional(),
example: z.string().optional(),
});
export const executionStepSchema = z.object({
interpreter: z.enum(['bash', 'node']),
scriptRelative: z.string(),
args: z.array(z.string()).optional(),
supportsDryRun: z.boolean().optional(),
/** When input name is truthy, append these args (after template substitution). */
whenInputTrue: z.record(z.string(), z.array(z.string())).optional(),
});
export const executionSchema = z.object({
steps: z.array(executionStepSchema).min(1),
});
export const runbookSpecSchema = z.object({
id: z.string(),
title: z.string(),
summary: z.string(),
whyItMatters: z.string(),
audienceHelp: z.string(),
docPath: z.string(),
prerequisites: z.array(z.string()),
steps: z.array(stepSchema),
inputs: z.array(inputFieldSchema),
touchpoints: z.array(touchpointSchema),
complianceFramework: z.string(),
execution: executionSchema,
executionNote: z.string().optional(),
});
export type RunbookSpec = z.infer<typeof runbookSpecSchema>;
export type RunbookInputField = z.infer<typeof inputFieldSchema>;
export type RunbookTouchpoint = z.infer<typeof touchpointSchema>;
export type ExecutionStep = z.infer<typeof executionStepSchema>;

View File

@@ -0,0 +1,80 @@
import type { RunbookSpec } from '@/lib/runbook-schema';
export type TouchpointState = {
id: string;
label: string;
status: 'PENDING' | 'PASS' | 'FAIL';
evidence: string;
grade: 'GREEN' | 'AMBER' | 'RED';
};
const initialStates = (spec: RunbookSpec): Map<string, TouchpointState> => {
const m = new Map<string, TouchpointState>();
for (const tp of spec.touchpoints) {
m.set(tp.id, {
id: tp.id,
label: tp.label,
status: 'PENDING',
evidence: 'Awaiting execution output',
grade: 'AMBER',
});
}
return m;
};
export function createTouchpointTracker(spec: RunbookSpec) {
const states = initialStates(spec);
const buffer = { stdout: '' };
function ingestStdout(chunk: string): void {
buffer.stdout += chunk;
for (const tp of spec.touchpoints) {
const st = states.get(tp.id);
if (!st || st.status !== 'PENDING') continue;
if (tp.passCondition === 'exit_zero') {
continue;
}
const pat = tp.pattern;
if (!pat) continue;
if (tp.passCondition === 'stdout_contains') {
if (buffer.stdout.includes(pat)) {
st.status = 'PASS';
st.evidence = `Stdout contained required pattern: ${JSON.stringify(pat)}`;
st.grade = 'GREEN';
}
} else if (tp.passCondition === 'stdout_not_contains') {
if (buffer.stdout.includes(pat)) {
st.status = 'FAIL';
st.evidence = `Stdout contained forbidden pattern: ${JSON.stringify(pat)}`;
st.grade = 'RED';
}
}
}
}
function finalize(exitCode: number | null): TouchpointState[] {
for (const tp of spec.touchpoints) {
const st = states.get(tp.id);
if (!st) continue;
if (tp.passCondition === 'exit_zero') {
if (exitCode === 0) {
st.status = 'PASS';
st.evidence = 'Process exited with code 0';
st.grade = 'GREEN';
} else {
st.status = 'FAIL';
st.evidence = `Process exited with code ${exitCode ?? 'null'}`;
st.grade = 'RED';
}
} else if (st.status === 'PENDING') {
st.status = 'FAIL';
st.evidence = 'Expected pattern or condition not observed before process end';
st.grade = 'RED';
}
}
return [...states.values()];
}
return { ingestStdout, finalize, getStates: () => [...states.values()] };
}

View File

@@ -0,0 +1,45 @@
import type { Config } from 'tailwindcss';
const config: Config = {
content: ['./src/**/*.{js,ts,jsx,tsx,mdx}'],
theme: {
extend: {
colors: {
tardis: {
deep: '#001a33',
panel: '#003b6f',
bright: '#0066cc',
glow: '#00b4d8',
amber: '#ffb703',
paper: '#f8fafc',
},
},
fontFamily: {
display: [
'system-ui',
'Segoe UI',
'Roboto',
'Helvetica Neue',
'Arial',
'sans-serif',
],
mono: [
'ui-monospace',
'SFMono-Regular',
'Menlo',
'Monaco',
'Consolas',
'Liberation Mono',
'monospace',
],
},
boxShadow: {
tardis: '0 0 24px rgba(0, 180, 216, 0.35)',
panel: 'inset 0 1px 0 rgba(255, 255, 255, 0.06)',
},
},
},
plugins: [],
};
export default config;

View File

@@ -0,0 +1,21 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [{ "name": "next" }],
"paths": { "@/*": ["./src/*"] }
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
"exclude": ["node_modules"]
}

View File

@@ -0,0 +1,22 @@
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { defineConfig } from 'vitest/config';
const root = fileURLToPath(new URL('.', import.meta.url));
const monorepoRoot = path.resolve(root, '..');
export default defineConfig({
test: {
environment: 'node',
include: ['src/**/*.test.ts'],
testTimeout: 60_000,
env: {
MISSION_CONTROL_PROJECT_ROOT: monorepoRoot,
},
},
resolve: {
alias: {
'@': path.resolve(root, 'src'),
},
},
});

View File

@@ -24,7 +24,11 @@
"test": "pnpm --filter mcp-proxmox-server test || echo \"No tests specified\"",
"test:basic": "cd mcp-proxmox && node test-basic-tools.js",
"test:workflows": "cd mcp-proxmox && node test-workflows.js",
"verify:ws-chain138": "node scripts/verify-ws-rpc-chain138.mjs"
"verify:ws-chain138": "node scripts/verify-ws-rpc-chain138.mjs",
"mission-control:dev": "pnpm --filter mission-control dev",
"mission-control:build": "pnpm --filter mission-control build",
"mission-control:start": "pnpm --filter mission-control start",
"mission-control:test": "pnpm --filter mission-control test"
},
"keywords": [
"proxmox",

20806
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,13 +2,13 @@ packages:
- alltra-lifi-settlement
- multi-chain-execution
- mcp-proxmox
- mcp-omada
# mcp-omada / omada-api: submodule remote unavailable (ARROMIS/omada-api); omit until cloned
- mcp-unifi
- mcp-site-manager
- omada-api
- unifi-api
- site-manager-api
- ProxmoxVE/frontend
- mission-control
- rpc-translator-138
- smom-dbis-138/frontend-dapp
- smom-dbis-138/services/token-aggregation

View File

@@ -0,0 +1,130 @@
#!/usr/bin/env bash
# Deploy Phoenix Deploy API to the dev VM (canonical: VMID 5700, IP_DEV_VM).
# Installs to /opt/phoenix-deploy-api and enables systemd (see phoenix-deploy-api/scripts/install-systemd.sh).
#
# Layout on the workstation: repo root must contain phoenix-deploy-api/ and
# config/public-sector-program-manifest.json (copied into /opt by install-systemd).
# Include phoenix-deploy-api/.env in your tree before deploy (not committed); it is packed if present.
#
# Requires: LAN SSH to the Proxmox node that hosts VMID 5700 (see get_host_for_vmid in
# scripts/lib/load-project-env.sh). Default PVE: r630-02 for 5700.
#
# Usage:
# ./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --dry-run
# ./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --apply
# ./scripts/deployment/deploy-phoenix-deploy-api-to-dev-vm.sh --apply --start-ct # pct start 5700 on PVE if stopped
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || {
echo "ERROR: load-project-env.sh not found at ${PROJECT_ROOT}/scripts/lib/load-project-env.sh" >&2
exit 1
}
VMID="${PHOENIX_DEPLOY_DEV_VM_VMID:-5700}"
PVE_HOST="${PHOENIX_DEPLOY_PVE_HOST:-$(get_host_for_vmid "$VMID")}"
PVE_USER="${PHOENIX_DEPLOY_PVE_USER:-root}"
SSH_OPTS="${PHOENIX_DEPLOY_SSH_OPTS:--o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new}"
IP_DEV_VM="${IP_DEV_VM:-192.168.11.59}"
DRY_RUN=1
START_CT=0
for a in "$@"; do
if [[ "$a" == "--apply" ]]; then DRY_RUN=0; fi
if [[ "$a" == "--dry-run" ]]; then DRY_RUN=1; fi
if [[ "$a" == "--start-ct" ]]; then START_CT=1; fi
done
MANIFEST="${PROJECT_ROOT}/config/public-sector-program-manifest.json"
if [[ ! -f "$MANIFEST" ]]; then
echo "WARN: missing ${MANIFEST} — install on CT will warn; add file or fix path." >&2
fi
if [[ ! -d "${PROJECT_ROOT}/phoenix-deploy-api" ]]; then
echo "ERROR: ${PROJECT_ROOT}/phoenix-deploy-api not found." >&2
exit 1
fi
echo "=============================================="
echo "Phoenix Deploy API → dev VM"
echo " VMID: $VMID (expected IP: $IP_DEV_VM)"
echo " PVE host: ${PVE_USER}@${PVE_HOST}"
echo " Dry-run: $DRY_RUN"
echo "=============================================="
REMOTE_TAR="/tmp/pda-deploy-bundle.tar.gz"
STAGE="/tmp/proxmox-pda-stage"
remote_block() {
# shellcheck disable=SC2029
ssh $SSH_OPTS "${PVE_USER}@${PVE_HOST}" "$@"
}
if [[ "$DRY_RUN" -eq 1 ]]; then
echo "Dry-run only. Would:"
echo " 1. tar czf (phoenix-deploy-api + config/public-sector-program-manifest.json)"
echo " 2. scp bundle → ${PVE_USER}@${PVE_HOST}:${REMOTE_TAR}"
echo " 3. pct push ${VMID} … /root/pda-deploy.tar.gz && pct exec ${VMID} -- install-systemd.sh"
echo " 4. curl http://${IP_DEV_VM}:4001/health"
echo "Optional: --start-ct starts VMID ${VMID} on ${PVE_HOST} if it is stopped (pct must target a running CT)."
echo "Re-run with --apply to execute."
exit 0
fi
TMP_TAR="$(mktemp /tmp/pda-deploy-XXXXXX.tar.gz)"
cleanup() { rm -f "$TMP_TAR"; }
trap cleanup EXIT
cd "$PROJECT_ROOT"
tar czf "$TMP_TAR" phoenix-deploy-api config/public-sector-program-manifest.json
ensure_ct_running() {
if remote_block "pct exec ${VMID} -- true 2>/dev/null"; then
return 0
fi
echo "CT ${VMID} is not running or not reachable (pct exec failed)." >&2
if [[ "$START_CT" -eq 1 ]]; then
echo "Starting CT ${VMID} on ${PVE_HOST} (--start-ct)..."
if ! remote_block "pct start ${VMID}"; then
echo "pct start failed — CT may not exist on this node. Find VMID: ssh ${PVE_USER}@${PVE_HOST} \"pct list\"" >&2
echo "Override: PHOENIX_DEPLOY_PVE_HOST=<node-ip> PHOENIX_DEPLOY_DEV_VM_VMID=<id> $0 --apply" >&2
exit 1
fi
sleep 3
if ! remote_block "pct exec ${VMID} -- true 2>/dev/null"; then
echo "CT ${VMID} still not reachable after start." >&2
exit 1
fi
return 0
fi
echo "Start the dev VM first, e.g. on ${PVE_HOST}: pct start ${VMID}" >&2
echo "Or re-run with --apply --start-ct (scoped to this script only)." >&2
exit 1
}
run_deploy() {
ensure_ct_running
echo "[1/3] Upload bundle to PVE..."
scp $SSH_OPTS "$TMP_TAR" "${PVE_USER}@${PVE_HOST}:${REMOTE_TAR}"
echo "[2/3] pct push → CT ${VMID}, extract, install-systemd..."
remote_block bash -s <<REMOTE_EOF
set -euo pipefail
pct push ${VMID} ${REMOTE_TAR} /root/pda-deploy.tar.gz
pct exec ${VMID} -- bash -c "set -euo pipefail; rm -rf ${STAGE}; mkdir -p ${STAGE}; tar xzf /root/pda-deploy.tar.gz -C ${STAGE}; cd ${STAGE} && bash phoenix-deploy-api/scripts/install-systemd.sh; rm -f /root/pda-deploy.tar.gz"
rm -f ${REMOTE_TAR}
REMOTE_EOF
echo "[3/3] Health check on dev VM (LAN)..."
if command -v curl >/dev/null 2>&1; then
curl -sS --max-time 10 -o /dev/null -w " http://${IP_DEV_VM}:4001/health → HTTP %{http_code}\n" "http://${IP_DEV_VM}:4001/health" || echo " (curl failed — check firewall or service)"
else
echo " (curl not installed locally; skip health check)"
fi
}
run_deploy
echo "Done."

View File

@@ -5,7 +5,7 @@
# Usage: source "${SCRIPT_DIR}/lib/load-project-env.sh"
#
# Env precedence (first wins): 1) .env 2) config/ip-addresses.conf 3) smom-dbis-138/.env 4) dbis_core config
# Version: 2026-01-31
# Version: 2026-04-13 (get_host_for_vmid: explicit Sankofa 78007806 on r630-01)
[[ -n "${PROJECT_ROOT:-}" ]] || PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
export PROJECT_ROOT
@@ -13,31 +13,71 @@ export PROJECT_ROOT
# err_exit: print message and exit (use when load-project-env is sourced)
err_exit() { echo "ERROR: $1" >&2; exit 1; }
# Dotenv / shell env snippets may use ${OTHER_VAR} without :- defaults; callers may use set -u.
_lpr_source_relaxed() {
local f="$1"
[[ -f "$f" ]] || return 0
local _had_u=0
[[ -o nounset ]] && _had_u=1
set +u
# shellcheck disable=SC1090
source "$f" 2>/dev/null || true
if [[ "$_had_u" -eq 1 ]]; then
set -u
else
set +u
fi
}
_lpr_dotenv_source() {
local f="$1"
[[ -f "$f" ]] || return 0
local _had_u=0
[[ -o nounset ]] && _had_u=1
set +u
set -a
# shellcheck disable=SC1090
source "$f" 2>/dev/null || true
set +a
if [[ "$_had_u" -eq 1 ]]; then
set -u
else
set +u
fi
}
# Path validation
[[ -d "$PROJECT_ROOT" ]] || err_exit "PROJECT_ROOT not a directory: $PROJECT_ROOT"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] || echo "WARN: config/ip-addresses.conf not found; using defaults" >&2
# 1. Root .env (Cloudflare, Proxmox, etc.)
[[ -f "${PROJECT_ROOT}/.env" ]] && set -a && source "${PROJECT_ROOT}/.env" 2>/dev/null && set +a
_lpr_dotenv_source "${PROJECT_ROOT}/.env"
# 2. IP/config from centralized config
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && _lpr_source_relaxed "${PROJECT_ROOT}/config/ip-addresses.conf" || true
# 3. smom-dbis-138 .env (PRIVATE_KEY, bridge addrs, RPC) — PRIVATE_KEY is read from this dotenv when not set
[[ -f "${PROJECT_ROOT}/smom-dbis-138/.env" ]] && set -a && source "${PROJECT_ROOT}/smom-dbis-138/.env" 2>/dev/null && set +a
_lpr_dotenv_source "${PROJECT_ROOT}/smom-dbis-138/.env"
# 3b. Secure secrets (PRIVATE_KEY) — when not set, try ~/.secure-secrets/private-keys.env
[[ -z "${PRIVATE_KEY:-}" ]] && [[ -f "${HOME}/.secure-secrets/private-keys.env" ]] && set -a && source "${HOME}/.secure-secrets/private-keys.env" 2>/dev/null && set +a
[[ -z "${PRIVATE_KEY:-}" ]] && [[ -f "${HOME}/.secure-secrets/private-keys.env" ]] && _lpr_dotenv_source "${HOME}/.secure-secrets/private-keys.env"
# 3c. Dedicated keeper key (KEEPER_PRIVATE_KEY) — separate signer for keeper/upkeep flows
KEEPER_SECRET_FILE="${KEEPER_SECRET_FILE:-${HOME}/.secure-secrets/chain138-keeper.env}"
[[ -z "${KEEPER_PRIVATE_KEY:-}" ]] && [[ -f "${KEEPER_SECRET_FILE}" ]] && _lpr_dotenv_source "${KEEPER_SECRET_FILE}"
# 4. dbis_core config if present
[[ -f "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" ]] && source "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" ]] && _lpr_source_relaxed "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" || true
# 4b. Strip trailing CR/LF from RPC URL vars (editor mistakes; breaks cast/curl)
for _lpr_k in RPC_URL_138 RPC_URL CHAIN138_RPC CHAIN138_RPC_URL ETHEREUM_MAINNET_RPC \
for _lpr_k in RPC_URL_138 RPC_URL CHAIN138_RPC CHAIN138_RPC_URL CHAIN_138_RPC_URL \
TOKEN_AGG_CHAIN138_RPC_URL TOKEN_AGGREGATION_CHAIN138_RPC_URL TOKEN_AGGREGATION_PMM_RPC_URL \
ETHEREUM_MAINNET_RPC \
XDC_PARENTNET_URL PARENTNET_URL SUBNET_URL XDC_ZERO_PEER_RPC_URL \
RPC_URL_138_PUBLIC GNOSIS_MAINNET_RPC GNOSIS_RPC CRONOS_RPC_URL CRONOS_RPC \
CELO_MAINNET_RPC CELO_RPC WEMIX_RPC WEMIX_MAINNET_RPC BSC_RPC_URL \
POLYGON_MAINNET_RPC BASE_MAINNET_RPC OPTIMISM_MAINNET_RPC ARBITRUM_MAINNET_RPC \
AVALANCHE_RPC_URL AVALANCHE_RPC; do
AVALANCHE_RPC_URL AVALANCHE_RPC CHAIN_651940_RPC_URL FLASH_PROVIDER_RPC_URL; do
_lpr_v="${!_lpr_k:-}"
[[ -z "$_lpr_v" ]] && continue
_lpr_v="${_lpr_v%$'\r'}"
@@ -46,13 +86,32 @@ for _lpr_k in RPC_URL_138 RPC_URL CHAIN138_RPC CHAIN138_RPC_URL ETHEREUM_MAINNET
done
unset _lpr_k _lpr_v 2>/dev/null || true
# 4c. economics-toolkit gas-quote overrides: ECONOMICS_GAS_RPC_<chainId> (same strip)
for _lpr_id in 1 10 25 56 100 137 138 1111 8453 42161 42220 43114 651940; do
_lpr_k="ECONOMICS_GAS_RPC_${_lpr_id}"
_lpr_v="${!_lpr_k:-}"
[[ -z "$_lpr_v" ]] && continue
_lpr_v="${_lpr_v%$'\r'}"
_lpr_v="${_lpr_v%$'\n'}"
export "$_lpr_k=$_lpr_v"
done
unset _lpr_k _lpr_v _lpr_id 2>/dev/null || true
# 5. Contract addresses from master JSON (config/smart-contracts-master.json) when not set by .env
[[ -f "${PROJECT_ROOT}/scripts/lib/load-contract-addresses.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-contract-addresses.sh" 2>/dev/null || true
# Ensure hosts have fallbacks (from config or defaults)
PROXMOX_HOST_R630_01="${PROXMOX_HOST_R630_01:-${PROXMOX_R630_01:-192.168.11.11}}"
PROXMOX_HOST_R630_02="${PROXMOX_HOST_R630_02:-${PROXMOX_R630_02:-192.168.11.12}}"
PROXMOX_HOST_R630_03="${PROXMOX_HOST_R630_03:-${PROXMOX_R630_03:-192.168.11.13}}"
PROXMOX_HOST_R630_04="${PROXMOX_HOST_R630_04:-${PROXMOX_R630_04:-192.168.11.14}}"
PROXMOX_HOST_ML110="${PROXMOX_HOST_ML110:-${PROXMOX_ML110:-192.168.11.10}}"
# Proxmox hypervisor FQDNs (canonical: <host>.sankofa.nexus — align with LAN DNS)
export PROXMOX_FQDN_ML110="${PROXMOX_FQDN_ML110:-ml110.sankofa.nexus}"
export PROXMOX_FQDN_R630_01="${PROXMOX_FQDN_R630_01:-r630-01.sankofa.nexus}"
export PROXMOX_FQDN_R630_02="${PROXMOX_FQDN_R630_02:-r630-02.sankofa.nexus}"
export PROXMOX_FQDN_R630_03="${PROXMOX_FQDN_R630_03:-r630-03.sankofa.nexus}"
export PROXMOX_FQDN_R630_04="${PROXMOX_FQDN_R630_04:-r630-04.sankofa.nexus}"
# Derived vars (from config; fallbacks for missing config)
export RPC_CORE_1="${RPC_CORE_1:-192.168.11.211}"
@@ -69,14 +128,20 @@ export SMOM_DIR="${SMOM_DBIS_138_DIR:-${PROJECT_ROOT}/smom-dbis-138}"
export DBIS_CORE_DIR="${DBIS_CORE_DIR:-${PROJECT_ROOT}/dbis_core}"
# VMID -> Proxmox host (for pct/qm operations)
# Covers: DBIS (101xx), RPC (2101, 2201, 2301, etc.), Blockscout (5000), CCIP (5400-5476), NPMplus (10233, 10234)
# Covers: DBIS (101xx), RPC (2101-2103, 2201, 2301, etc.), Blockscout (5000), CCIP (5400-5476), NPMplus (10233, 10234), Sankofa stack (78007806)
# Live placement (2026-04-09): validators 1003/1004, sentries 1503-1510, and RPCs 2102, 2301, 2304, 2400, 2402, 2403 on r630-03;
# RPCs 2201, 2303, 2305-2308, 2401 on r630-02; 2101 + 2103 remain on r630-01 — see ALL_VMIDS_ENDPOINTS.md
# Dev VM (GitOps / Gitea sidecar target): VMID 5700 on r630-04 (verified cluster API 2026-04-17)
get_host_for_vmid() {
local vmid="$1"
case "$vmid" in
10130|10150|10151|106|107|108|10000|10001|10020|10100|10101|10120|10233|10235) echo "${PROXMOX_HOST_R630_01}";;
2101) echo "${PROXMOX_HOST_R630_01}";;
5000|5700|7810|2201|2303|2401|6200|6201|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";;
2301|2400|1504|2503|2504|2505) echo "${PROXMOX_HOST_ML110}";;
7800|7801|7802|7803|7804|7805|7806) echo "${PROXMOX_HOST_R630_01}";;
10130|10150|10151|106|107|108|10000|10001|10020|10100|10101|10120|10203|10233|10235) echo "${PROXMOX_HOST_R630_01}";;
1000|1001|1002|1500|1501|1502|2101|2103) echo "${PROXMOX_HOST_R630_01}";;
1003|1004|1503|1504|1505|1506|1507|1508|1509|1510|2102|2301|2304|2400|2402|2403) echo "${PROXMOX_HOST_R630_03}";;
5700) echo "${PROXMOX_HOST_R630_04}";;
5000|7810|2201|2303|2305|2306|2307|2308|2401|6200|6201|6202|6203|6204|6205|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";;
2420|2430|2440|2460|2470|2480) echo "${PROXMOX_HOST_R630_01}";;
5400|5401|5402|5403|5410|5411|5412|5413|5414|5415|5416|5417|5418|5419|5420|5421|5422|5423|5424|5425|5440|5441|5442|5443|5444|5445|5446|5447|5448|5449|5450|5451|5452|5453|5454|5455|5470|5471|5472|5473|5474|5475|5476) echo "${PROXMOX_HOST_R630_02}";;
*) echo "${PROXMOX_HOST_R630_01:-${PROXMOX_R630_02}}";;
esac

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env node
/**
* Minimal health probe for Mission Control (allowlisted path under scripts/).
*/
console.log('MISSION_CONTROL_HEALTH_OK');

View File

@@ -1,247 +0,0 @@
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove VIRTUAL_ENV_PROMPT altogether.
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
$env:VIRTUAL_ENV_PROMPT = $Prompt
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

View File

@@ -1,70 +0,0 @@
# This file must be used with "source bin/activate" *from bash*
# You cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# Call hash to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
hash -r 2> /dev/null
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
unset VIRTUAL_ENV_PROMPT
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
# on Windows, a path can contain colons and backslashes and has to be converted:
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
# transform D:\path\to\venv to /d/path/to/venv on MSYS
# and to /cygdrive/d/path/to/venv on Cygwin
export VIRTUAL_ENV=$(cygpath /home/intlc/projects/proxmox/venv)
else
# use the path as-is
export VIRTUAL_ENV=/home/intlc/projects/proxmox/venv
fi
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/"bin":$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
PS1='(venv) '"${PS1:-}"
export PS1
VIRTUAL_ENV_PROMPT='(venv) '
export VIRTUAL_ENV_PROMPT
fi
# Call hash to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
hash -r 2> /dev/null

View File

@@ -1,27 +0,0 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV /home/intlc/projects/proxmox/venv
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
set prompt = '(venv) '"$prompt"
setenv VIRTUAL_ENV_PROMPT '(venv) '
endif
alias pydoc python -m pydoc
rehash

View File

@@ -1,69 +0,0 @@
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
# (https://fishshell.com/). You cannot run it directly.
function deactivate -d "Exit virtual environment and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
set -e _OLD_FISH_PROMPT_OVERRIDE
# prevents error when using nested fish instances (Issue #93858)
if functions -q _old_fish_prompt
functions -e fish_prompt
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
end
set -e VIRTUAL_ENV
set -e VIRTUAL_ENV_PROMPT
if test "$argv[1]" != "nondestructive"
# Self-destruct!
functions -e deactivate
end
end
# Unset irrelevant variables.
deactivate nondestructive
set -gx VIRTUAL_ENV /home/intlc/projects/proxmox/venv
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
# Unset PYTHONHOME if set.
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# Save the current fish_prompt function as the function _old_fish_prompt.
functions -c fish_prompt _old_fish_prompt
# With the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command.
set -l old_status $status
# Output the venv prompt; color taken from the blue of the Python logo.
printf "%s%s%s" (set_color 4B8BBE) '(venv) ' (set_color normal)
# Restore the return status of the previous command.
echo "exit $old_status" | .
# Output the original/"old" prompt.
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
set -gx VIRTUAL_ENV_PROMPT '(venv) '
end

View File

@@ -1,8 +0,0 @@
#!/home/intlc/projects/proxmox/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View File

@@ -1,8 +0,0 @@
#!/home/intlc/projects/proxmox/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from charset_normalizer.cli import cli_detect
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli_detect())

View File

@@ -1,8 +0,0 @@
#!/home/intlc/projects/proxmox/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View File

@@ -1,8 +0,0 @@
#!/home/intlc/projects/proxmox/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View File

@@ -1,8 +0,0 @@
#!/home/intlc/projects/proxmox/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View File

@@ -1 +0,0 @@
python3

View File

@@ -1 +0,0 @@
/usr/bin/python3

View File

@@ -1 +0,0 @@
python3

View File

@@ -1,20 +0,0 @@
Copyright 2010 Jason Kirtland
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,60 +0,0 @@
Metadata-Version: 2.3
Name: blinker
Version: 1.9.0
Summary: Fast, simple object-to-object and broadcast signaling
Author: Jason Kirtland
Maintainer-email: Pallets Ecosystem <contact@palletsprojects.com>
Requires-Python: >=3.9
Description-Content-Type: text/markdown
Classifier: Development Status :: 5 - Production/Stable
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python
Classifier: Typing :: Typed
Project-URL: Chat, https://discord.gg/pallets
Project-URL: Documentation, https://blinker.readthedocs.io
Project-URL: Source, https://github.com/pallets-eco/blinker/
# Blinker
Blinker provides a fast dispatching system that allows any number of
interested parties to subscribe to events, or "signals".
## Pallets Community Ecosystem
> [!IMPORTANT]\
> This project is part of the Pallets Community Ecosystem. Pallets is the open
> source organization that maintains Flask; Pallets-Eco enables community
> maintenance of related projects. If you are interested in helping maintain
> this project, please reach out on [the Pallets Discord server][discord].
>
> [discord]: https://discord.gg/pallets
## Example
Signal receivers can subscribe to specific senders or receive signals
sent by any sender.
```pycon
>>> from blinker import signal
>>> started = signal('round-started')
>>> def each(round):
... print(f"Round {round}")
...
>>> started.connect(each)
>>> def round_two(round):
... print("This is round two.")
...
>>> started.connect(round_two, sender=2)
>>> for round in range(1, 4):
... started.send(round)
...
Round 1!
Round 2!
This is round two.
Round 3!
```

View File

@@ -1,12 +0,0 @@
blinker-1.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
blinker-1.9.0.dist-info/LICENSE.txt,sha256=nrc6HzhZekqhcCXSrhvjg5Ykx5XphdTw6Xac4p-spGc,1054
blinker-1.9.0.dist-info/METADATA,sha256=uIRiM8wjjbHkCtbCyTvctU37IAZk0kEe5kxAld1dvzA,1633
blinker-1.9.0.dist-info/RECORD,,
blinker-1.9.0.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
blinker/__init__.py,sha256=I2EdZqpy4LyjX17Hn1yzJGWCjeLaVaPzsMgHkLfj_cQ,317
blinker/__pycache__/__init__.cpython-312.pyc,,
blinker/__pycache__/_utilities.cpython-312.pyc,,
blinker/__pycache__/base.cpython-312.pyc,,
blinker/_utilities.py,sha256=0J7eeXXTUx0Ivf8asfpx0ycVkp0Eqfqnj117x2mYX9E,1675
blinker/base.py,sha256=QpDuvXXcwJF49lUBcH5BiST46Rz9wSG7VW_p7N_027M,19132
blinker/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

View File

@@ -1,4 +0,0 @@
Wheel-Version: 1.0
Generator: flit 3.10.1
Root-Is-Purelib: true
Tag: py3-none-any

View File

@@ -1,17 +0,0 @@
from __future__ import annotations
from .base import ANY
from .base import default_namespace
from .base import NamedSignal
from .base import Namespace
from .base import Signal
from .base import signal
__all__ = [
"ANY",
"default_namespace",
"NamedSignal",
"Namespace",
"Signal",
"signal",
]

View File

@@ -1,64 +0,0 @@
from __future__ import annotations
import collections.abc as c
import inspect
import typing as t
from weakref import ref
from weakref import WeakMethod
T = t.TypeVar("T")
class Symbol:
"""A constant symbol, nicer than ``object()``. Repeated calls return the
same instance.
>>> Symbol('foo') is Symbol('foo')
True
>>> Symbol('foo')
foo
"""
symbols: t.ClassVar[dict[str, Symbol]] = {}
def __new__(cls, name: str) -> Symbol:
if name in cls.symbols:
return cls.symbols[name]
obj = super().__new__(cls)
cls.symbols[name] = obj
return obj
def __init__(self, name: str) -> None:
self.name = name
def __repr__(self) -> str:
return self.name
def __getnewargs__(self) -> tuple[t.Any, ...]:
return (self.name,)
def make_id(obj: object) -> c.Hashable:
"""Get a stable identifier for a receiver or sender, to be used as a dict
key or in a set.
"""
if inspect.ismethod(obj):
# The id of a bound method is not stable, but the id of the unbound
# function and instance are.
return id(obj.__func__), id(obj.__self__)
if isinstance(obj, (str, int)):
# Instances with the same value always compare equal and have the same
# hash, even if the id may change.
return obj
# Assume other types are not hashable but will always be the same instance.
return id(obj)
def make_ref(obj: T, callback: c.Callable[[ref[T]], None] | None = None) -> ref[T]:
if inspect.ismethod(obj):
return WeakMethod(obj, callback) # type: ignore[arg-type, return-value]
return ref(obj, callback)

View File

@@ -1,512 +0,0 @@
from __future__ import annotations
import collections.abc as c
import sys
import typing as t
import weakref
from collections import defaultdict
from contextlib import contextmanager
from functools import cached_property
from inspect import iscoroutinefunction
from ._utilities import make_id
from ._utilities import make_ref
from ._utilities import Symbol
F = t.TypeVar("F", bound=c.Callable[..., t.Any])
ANY = Symbol("ANY")
"""Symbol for "any sender"."""
ANY_ID = 0
class Signal:
"""A notification emitter.
:param doc: The docstring for the signal.
"""
ANY = ANY
"""An alias for the :data:`~blinker.ANY` sender symbol."""
set_class: type[set[t.Any]] = set
"""The set class to use for tracking connected receivers and senders.
Python's ``set`` is unordered. If receivers must be dispatched in the order
they were connected, an ordered set implementation can be used.
.. versionadded:: 1.7
"""
@cached_property
def receiver_connected(self) -> Signal:
"""Emitted at the end of each :meth:`connect` call.
The signal sender is the signal instance, and the :meth:`connect`
arguments are passed through: ``receiver``, ``sender``, and ``weak``.
.. versionadded:: 1.2
"""
return Signal(doc="Emitted after a receiver connects.")
@cached_property
def receiver_disconnected(self) -> Signal:
"""Emitted at the end of each :meth:`disconnect` call.
The sender is the signal instance, and the :meth:`disconnect` arguments
are passed through: ``receiver`` and ``sender``.
This signal is emitted **only** when :meth:`disconnect` is called
explicitly. This signal cannot be emitted by an automatic disconnect
when a weakly referenced receiver or sender goes out of scope, as the
instance is no longer be available to be used as the sender for this
signal.
An alternative approach is available by subscribing to
:attr:`receiver_connected` and setting up a custom weakref cleanup
callback on weak receivers and senders.
.. versionadded:: 1.2
"""
return Signal(doc="Emitted after a receiver disconnects.")
def __init__(self, doc: str | None = None) -> None:
if doc:
self.__doc__ = doc
self.receivers: dict[
t.Any, weakref.ref[c.Callable[..., t.Any]] | c.Callable[..., t.Any]
] = {}
"""The map of connected receivers. Useful to quickly check if any
receivers are connected to the signal: ``if s.receivers:``. The
structure and data is not part of the public API, but checking its
boolean value is.
"""
self.is_muted: bool = False
self._by_receiver: dict[t.Any, set[t.Any]] = defaultdict(self.set_class)
self._by_sender: dict[t.Any, set[t.Any]] = defaultdict(self.set_class)
self._weak_senders: dict[t.Any, weakref.ref[t.Any]] = {}
def connect(self, receiver: F, sender: t.Any = ANY, weak: bool = True) -> F:
"""Connect ``receiver`` to be called when the signal is sent by
``sender``.
:param receiver: The callable to call when :meth:`send` is called with
the given ``sender``, passing ``sender`` as a positional argument
along with any extra keyword arguments.
:param sender: Any object or :data:`ANY`. ``receiver`` will only be
called when :meth:`send` is called with this sender. If ``ANY``, the
receiver will be called for any sender. A receiver may be connected
to multiple senders by calling :meth:`connect` multiple times.
:param weak: Track the receiver with a :mod:`weakref`. The receiver will
be automatically disconnected when it is garbage collected. When
connecting a receiver defined within a function, set to ``False``,
otherwise it will be disconnected when the function scope ends.
"""
receiver_id = make_id(receiver)
sender_id = ANY_ID if sender is ANY else make_id(sender)
if weak:
self.receivers[receiver_id] = make_ref(
receiver, self._make_cleanup_receiver(receiver_id)
)
else:
self.receivers[receiver_id] = receiver
self._by_sender[sender_id].add(receiver_id)
self._by_receiver[receiver_id].add(sender_id)
if sender is not ANY and sender_id not in self._weak_senders:
# store a cleanup for weakref-able senders
try:
self._weak_senders[sender_id] = make_ref(
sender, self._make_cleanup_sender(sender_id)
)
except TypeError:
pass
if "receiver_connected" in self.__dict__ and self.receiver_connected.receivers:
try:
self.receiver_connected.send(
self, receiver=receiver, sender=sender, weak=weak
)
except TypeError:
# TODO no explanation or test for this
self.disconnect(receiver, sender)
raise
return receiver
def connect_via(self, sender: t.Any, weak: bool = False) -> c.Callable[[F], F]:
"""Connect the decorated function to be called when the signal is sent
by ``sender``.
The decorated function will be called when :meth:`send` is called with
the given ``sender``, passing ``sender`` as a positional argument along
with any extra keyword arguments.
:param sender: Any object or :data:`ANY`. ``receiver`` will only be
called when :meth:`send` is called with this sender. If ``ANY``, the
receiver will be called for any sender. A receiver may be connected
to multiple senders by calling :meth:`connect` multiple times.
:param weak: Track the receiver with a :mod:`weakref`. The receiver will
be automatically disconnected when it is garbage collected. When
connecting a receiver defined within a function, set to ``False``,
otherwise it will be disconnected when the function scope ends.=
.. versionadded:: 1.1
"""
def decorator(fn: F) -> F:
self.connect(fn, sender, weak)
return fn
return decorator
@contextmanager
def connected_to(
self, receiver: c.Callable[..., t.Any], sender: t.Any = ANY
) -> c.Generator[None, None, None]:
"""A context manager that temporarily connects ``receiver`` to the
signal while a ``with`` block executes. When the block exits, the
receiver is disconnected. Useful for tests.
:param receiver: The callable to call when :meth:`send` is called with
the given ``sender``, passing ``sender`` as a positional argument
along with any extra keyword arguments.
:param sender: Any object or :data:`ANY`. ``receiver`` will only be
called when :meth:`send` is called with this sender. If ``ANY``, the
receiver will be called for any sender.
.. versionadded:: 1.1
"""
self.connect(receiver, sender=sender, weak=False)
try:
yield None
finally:
self.disconnect(receiver)
@contextmanager
def muted(self) -> c.Generator[None, None, None]:
"""A context manager that temporarily disables the signal. No receivers
will be called if the signal is sent, until the ``with`` block exits.
Useful for tests.
"""
self.is_muted = True
try:
yield None
finally:
self.is_muted = False
def send(
self,
sender: t.Any | None = None,
/,
*,
_async_wrapper: c.Callable[
[c.Callable[..., c.Coroutine[t.Any, t.Any, t.Any]]], c.Callable[..., t.Any]
]
| None = None,
**kwargs: t.Any,
) -> list[tuple[c.Callable[..., t.Any], t.Any]]:
"""Call all receivers that are connected to the given ``sender``
or :data:`ANY`. Each receiver is called with ``sender`` as a positional
argument along with any extra keyword arguments. Return a list of
``(receiver, return value)`` tuples.
The order receivers are called is undefined, but can be influenced by
setting :attr:`set_class`.
If a receiver raises an exception, that exception will propagate up.
This makes debugging straightforward, with an assumption that correctly
implemented receivers will not raise.
:param sender: Call receivers connected to this sender, in addition to
those connected to :data:`ANY`.
:param _async_wrapper: Will be called on any receivers that are async
coroutines to turn them into sync callables. For example, could run
the receiver with an event loop.
:param kwargs: Extra keyword arguments to pass to each receiver.
.. versionchanged:: 1.7
Added the ``_async_wrapper`` argument.
"""
if self.is_muted:
return []
results = []
for receiver in self.receivers_for(sender):
if iscoroutinefunction(receiver):
if _async_wrapper is None:
raise RuntimeError("Cannot send to a coroutine function.")
result = _async_wrapper(receiver)(sender, **kwargs)
else:
result = receiver(sender, **kwargs)
results.append((receiver, result))
return results
async def send_async(
self,
sender: t.Any | None = None,
/,
*,
_sync_wrapper: c.Callable[
[c.Callable[..., t.Any]], c.Callable[..., c.Coroutine[t.Any, t.Any, t.Any]]
]
| None = None,
**kwargs: t.Any,
) -> list[tuple[c.Callable[..., t.Any], t.Any]]:
"""Await all receivers that are connected to the given ``sender``
or :data:`ANY`. Each receiver is called with ``sender`` as a positional
argument along with any extra keyword arguments. Return a list of
``(receiver, return value)`` tuples.
The order receivers are called is undefined, but can be influenced by
setting :attr:`set_class`.
If a receiver raises an exception, that exception will propagate up.
This makes debugging straightforward, with an assumption that correctly
implemented receivers will not raise.
:param sender: Call receivers connected to this sender, in addition to
those connected to :data:`ANY`.
:param _sync_wrapper: Will be called on any receivers that are sync
callables to turn them into async coroutines. For example,
could call the receiver in a thread.
:param kwargs: Extra keyword arguments to pass to each receiver.
.. versionadded:: 1.7
"""
if self.is_muted:
return []
results = []
for receiver in self.receivers_for(sender):
if not iscoroutinefunction(receiver):
if _sync_wrapper is None:
raise RuntimeError("Cannot send to a non-coroutine function.")
result = await _sync_wrapper(receiver)(sender, **kwargs)
else:
result = await receiver(sender, **kwargs)
results.append((receiver, result))
return results
def has_receivers_for(self, sender: t.Any) -> bool:
"""Check if there is at least one receiver that will be called with the
given ``sender``. A receiver connected to :data:`ANY` will always be
called, regardless of sender. Does not check if weakly referenced
receivers are still live. See :meth:`receivers_for` for a stronger
search.
:param sender: Check for receivers connected to this sender, in addition
to those connected to :data:`ANY`.
"""
if not self.receivers:
return False
if self._by_sender[ANY_ID]:
return True
if sender is ANY:
return False
return make_id(sender) in self._by_sender
def receivers_for(
self, sender: t.Any
) -> c.Generator[c.Callable[..., t.Any], None, None]:
"""Yield each receiver to be called for ``sender``, in addition to those
to be called for :data:`ANY`. Weakly referenced receivers that are not
live will be disconnected and skipped.
:param sender: Yield receivers connected to this sender, in addition
to those connected to :data:`ANY`.
"""
# TODO: test receivers_for(ANY)
if not self.receivers:
return
sender_id = make_id(sender)
if sender_id in self._by_sender:
ids = self._by_sender[ANY_ID] | self._by_sender[sender_id]
else:
ids = self._by_sender[ANY_ID].copy()
for receiver_id in ids:
receiver = self.receivers.get(receiver_id)
if receiver is None:
continue
if isinstance(receiver, weakref.ref):
strong = receiver()
if strong is None:
self._disconnect(receiver_id, ANY_ID)
continue
yield strong
else:
yield receiver
def disconnect(self, receiver: c.Callable[..., t.Any], sender: t.Any = ANY) -> None:
"""Disconnect ``receiver`` from being called when the signal is sent by
``sender``.
:param receiver: A connected receiver callable.
:param sender: Disconnect from only this sender. By default, disconnect
from all senders.
"""
sender_id: c.Hashable
if sender is ANY:
sender_id = ANY_ID
else:
sender_id = make_id(sender)
receiver_id = make_id(receiver)
self._disconnect(receiver_id, sender_id)
if (
"receiver_disconnected" in self.__dict__
and self.receiver_disconnected.receivers
):
self.receiver_disconnected.send(self, receiver=receiver, sender=sender)
def _disconnect(self, receiver_id: c.Hashable, sender_id: c.Hashable) -> None:
if sender_id == ANY_ID:
if self._by_receiver.pop(receiver_id, None) is not None:
for bucket in self._by_sender.values():
bucket.discard(receiver_id)
self.receivers.pop(receiver_id, None)
else:
self._by_sender[sender_id].discard(receiver_id)
self._by_receiver[receiver_id].discard(sender_id)
def _make_cleanup_receiver(
self, receiver_id: c.Hashable
) -> c.Callable[[weakref.ref[c.Callable[..., t.Any]]], None]:
"""Create a callback function to disconnect a weakly referenced
receiver when it is garbage collected.
"""
def cleanup(ref: weakref.ref[c.Callable[..., t.Any]]) -> None:
# If the interpreter is shutting down, disconnecting can result in a
# weird ignored exception. Don't call it in that case.
if not sys.is_finalizing():
self._disconnect(receiver_id, ANY_ID)
return cleanup
def _make_cleanup_sender(
self, sender_id: c.Hashable
) -> c.Callable[[weakref.ref[t.Any]], None]:
"""Create a callback function to disconnect all receivers for a weakly
referenced sender when it is garbage collected.
"""
assert sender_id != ANY_ID
def cleanup(ref: weakref.ref[t.Any]) -> None:
self._weak_senders.pop(sender_id, None)
for receiver_id in self._by_sender.pop(sender_id, ()):
self._by_receiver[receiver_id].discard(sender_id)
return cleanup
def _cleanup_bookkeeping(self) -> None:
"""Prune unused sender/receiver bookkeeping. Not threadsafe.
Connecting & disconnecting leaves behind a small amount of bookkeeping
data. Typical workloads using Blinker, for example in most web apps,
Flask, CLI scripts, etc., are not adversely affected by this
bookkeeping.
With a long-running process performing dynamic signal routing with high
volume, e.g. connecting to function closures, senders are all unique
object instances. Doing all of this over and over may cause memory usage
to grow due to extraneous bookkeeping. (An empty ``set`` for each stale
sender/receiver pair.)
This method will prune that bookkeeping away, with the caveat that such
pruning is not threadsafe. The risk is that cleanup of a fully
disconnected receiver/sender pair occurs while another thread is
connecting that same pair. If you are in the highly dynamic, unique
receiver/sender situation that has lead you to this method, that failure
mode is perhaps not a big deal for you.
"""
for mapping in (self._by_sender, self._by_receiver):
for ident, bucket in list(mapping.items()):
if not bucket:
mapping.pop(ident, None)
def _clear_state(self) -> None:
"""Disconnect all receivers and senders. Useful for tests."""
self._weak_senders.clear()
self.receivers.clear()
self._by_sender.clear()
self._by_receiver.clear()
class NamedSignal(Signal):
"""A named generic notification emitter. The name is not used by the signal
itself, but matches the key in the :class:`Namespace` that it belongs to.
:param name: The name of the signal within the namespace.
:param doc: The docstring for the signal.
"""
def __init__(self, name: str, doc: str | None = None) -> None:
super().__init__(doc)
#: The name of this signal.
self.name: str = name
def __repr__(self) -> str:
base = super().__repr__()
return f"{base[:-1]}; {self.name!r}>" # noqa: E702
class Namespace(dict[str, NamedSignal]):
"""A dict mapping names to signals."""
def signal(self, name: str, doc: str | None = None) -> NamedSignal:
"""Return the :class:`NamedSignal` for the given ``name``, creating it
if required. Repeated calls with the same name return the same signal.
:param name: The name of the signal.
:param doc: The docstring of the signal.
"""
if name not in self:
self[name] = NamedSignal(name, doc)
return self[name]
class _PNamespaceSignal(t.Protocol):
def __call__(self, name: str, doc: str | None = None) -> NamedSignal: ...
default_namespace: Namespace = Namespace()
"""A default :class:`Namespace` for creating named signals. :func:`signal`
creates a :class:`NamedSignal` in this namespace.
"""
signal: _PNamespaceSignal = default_namespace.signal
"""Return a :class:`NamedSignal` in :data:`default_namespace` with the given
``name``, creating it if required. Repeated calls with the same name return the
same signal.
"""

View File

@@ -1,78 +0,0 @@
Metadata-Version: 2.4
Name: certifi
Version: 2025.11.12
Summary: Python package for providing Mozilla's CA Bundle.
Home-page: https://github.com/certifi/python-certifi
Author: Kenneth Reitz
Author-email: me@kennethreitz.com
License: MPL-2.0
Project-URL: Source, https://github.com/certifi/python-certifi
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
Classifier: Natural Language :: English
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Classifier: Programming Language :: Python :: 3.14
Requires-Python: >=3.7
License-File: LICENSE
Dynamic: author
Dynamic: author-email
Dynamic: classifier
Dynamic: description
Dynamic: home-page
Dynamic: license
Dynamic: license-file
Dynamic: project-url
Dynamic: requires-python
Dynamic: summary
Certifi: Python SSL Certificates
================================
Certifi provides Mozilla's carefully curated collection of Root Certificates for
validating the trustworthiness of SSL certificates while verifying the identity
of TLS hosts. It has been extracted from the `Requests`_ project.
Installation
------------
``certifi`` is available on PyPI. Simply install it with ``pip``::
$ pip install certifi
Usage
-----
To reference the installed certificate authority (CA) bundle, you can use the
built-in function::
>>> import certifi
>>> certifi.where()
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
Or from the command line::
$ python -m certifi
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
Enjoy!
.. _`Requests`: https://requests.readthedocs.io/en/master/
Addition/Removal of Certificates
--------------------------------
Certifi does not support any addition/removal or other modification of the
CA trust store content. This project is intended to provide a reliable and
highly portable root of trust to python deployments. Look to upstream projects
for methods to use alternate trust.

View File

@@ -1,14 +0,0 @@
certifi-2025.11.12.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
certifi-2025.11.12.dist-info/METADATA,sha256=_JprGu_1lWSdHlruRBKcorXnrfvBDhvX_6KRr8HQbLc,2475
certifi-2025.11.12.dist-info/RECORD,,
certifi-2025.11.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
certifi-2025.11.12.dist-info/licenses/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
certifi-2025.11.12.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
certifi/__init__.py,sha256=1BRSxNMnZW7CZ2oJtYWLoJgfHfcB9i273exwiPwfjJM,94
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
certifi/__pycache__/__init__.cpython-312.pyc,,
certifi/__pycache__/__main__.cpython-312.pyc,,
certifi/__pycache__/core.cpython-312.pyc,,
certifi/cacert.pem,sha256=oa1dZD4hxDtb7XTH4IkdzbWPavUcis4eTwINZUqlKhY,283932
certifi/core.py,sha256=XFXycndG5pf37ayeF8N32HUuDafsyhkVMbO4BAPWHa0,3394
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

View File

@@ -1,5 +0,0 @@
Wheel-Version: 1.0
Generator: setuptools (80.9.0)
Root-Is-Purelib: true
Tag: py3-none-any

View File

@@ -1,20 +0,0 @@
This package contains a modified version of ca-bundle.crt:
ca-bundle.crt -- Bundle of CA Root Certificates
This is a bundle of X.509 certificates of public Certificate Authorities
(CA). These were automatically extracted from Mozilla's root certificates
file (certdata.txt). This file can be found in the mozilla source tree:
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
It contains the certificates in PEM format and therefore
can be directly used with curl / libcurl / php_curl, or with
an Apache+mod_ssl webserver for SSL client authentication.
Just configure this file as the SSLCACertificateFile.#
***** BEGIN LICENSE BLOCK *****
This Source Code Form is subject to the terms of the Mozilla Public License,
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
one at http://mozilla.org/MPL/2.0/.
***** END LICENSE BLOCK *****
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $

View File

@@ -1,4 +0,0 @@
from .core import contents, where
__all__ = ["contents", "where"]
__version__ = "2025.11.12"

View File

@@ -1,12 +0,0 @@
import argparse
from certifi import contents, where
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--contents", action="store_true")
args = parser.parse_args()
if args.contents:
print(contents())
else:
print(where())

Some files were not shown because too many files have changed in this diff Show More