diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..f9a8c45 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,20 @@ +**/node_modules +**/.git +**/.github +**/dist +**/build +**/.vscode +**/.idea +**/.DS_Store +**/.env +**/.env.local +**/.env.*.local +**/coverage +**/*.log +**/npm-debug.log* +orchestrator/dist +orchestrator/coverage +contracts/cache +contracts/artifacts +terraform +k8s diff --git a/.env.sandbox.example b/.env.sandbox.example new file mode 100644 index 0000000..604143d --- /dev/null +++ b/.env.sandbox.example @@ -0,0 +1,55 @@ +# CurrenciCombo sandbox env — copy to `.env.sandbox` and edit. +# +# cp .env.sandbox.example .env.sandbox +# docker compose --env-file .env.sandbox up -d +# +# `EVENT_SIGNING_SECRET` and `ORCHESTRATOR_API_KEYS` are REQUIRED — +# orchestrator will refuse to boot without them (see PR I boot-time +# env assertions in orchestrator/src/config/env.ts). + +# ---- Postgres ---- +POSTGRES_DB=currencicombo +POSTGRES_USER=currencicombo +POSTGRES_PASSWORD=currencicombo +POSTGRES_PORT=5432 + +# ---- Redis ---- +REDIS_PORT=6379 + +# ---- Orchestrator ---- +ORCHESTRATOR_PORT=8080 +# 32+ random bytes, hex-encoded. Generate with: +# openssl rand -hex 32 +EVENT_SIGNING_SECRET=change-me-to-openssl-rand-hex-32 +# Comma-separated `key:role` pairs; role ∈ {initiator, settler, auditor} +# Generate a key with: +# openssl rand -hex 16 +ORCHESTRATOR_API_KEYS=local-demo-key:initiator,local-settler-key:settler,local-auditor-key:auditor + +# ---- Chain 138 (EXT-CHAIN138-CI-RPC resolved by default) ---- +CHAIN_138_RPC_URL=https://rpc.public-0138.defi-oracle.io +# Published by `contracts/scripts/deploy-notary-registry.ts` once you +# deploy NotaryRegistry.sol. Leave blank to run in mock-anchor mode. +NOTARY_REGISTRY_ADDRESS= +# Funded signer for on-chain anchors. Leave blank to run in mock-anchor +# mode (orchestrator logs "[NotaryChain] mock anchor — reason: notary +# envs not set" when unset). +ORCHESTRATOR_PRIVATE_KEY= + +# ---- External blockers (leave blank to run in sandbox/mock mode) ---- +# EXT-DBIS-CORE — flip when dbis_core is deployed +DBIS_CORE_URL= +# EXT-FIN-GATEWAY — flip when real FIN / Alliance Access gateway is provisioned +FIN_SANDBOX_URL= +# cc-identity-core HTTP base URL +CC_IDENTITY_URL= +# cc-compliance-controls matrix JSON URL (optional — embedded v0 is used if blank) +CC_CONTROLS_MATRIX_URL= + +# ---- Portal (Vite) ---- +PORTAL_PORT=3000 +# Baked into the portal bundle at build time. Must be the URL the +# browser uses to reach the orchestrator (usually localhost + the +# published ORCHESTRATOR_PORT). Leave blank to run the portal in its +# built-in demo-fallback mode. +VITE_ORCHESTRATOR_URL=http://localhost:8080 diff --git a/.gitignore b/.gitignore index 204a680..3d00286 100644 --- a/.gitignore +++ b/.gitignore @@ -78,3 +78,4 @@ pnpm-lock.yaml # Misc *.pem *.key +.env.sandbox diff --git a/Dockerfile b/Dockerfile index 29c84cb..3506d8c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,39 +1,45 @@ -# Multi-stage Dockerfile for orchestrator service -FROM node:18-alpine AS builder +# Multi-stage build for the CurrenciCombo portal (Vite + React). +# +# Context MUST be the repo root so the vite build can see src/, public/, +# index.html, etc.: +# +# docker build -t currencicombo/portal:local . +# +# VITE_ORCHESTRATOR_URL is baked at build time (Vite inlines env vars +# prefixed with VITE_). In a sandbox compose, set it to whatever URL +# the browser uses to reach the orchestrator — typically +# http://localhost:8080 if the orchestrator's port is published on the +# host. When unset, the portal runs in its built-in demo-fallback mode +# (see src/services/orchestrator.ts). +# ------- build stage ------- +FROM node:20-alpine AS build WORKDIR /app -# Copy package files -COPY orchestrator/package*.json ./ -RUN npm ci +ARG VITE_ORCHESTRATOR_URL="" +ENV VITE_ORCHESTRATOR_URL=${VITE_ORCHESTRATOR_URL} -# Copy source -COPY orchestrator/ ./ +COPY package.json package-lock.json ./ +# vite 7 ships @rolldown/binding-* as platform-matched optional deps, +# so we MUST include optional deps (skipping them breaks `vite build` +# with "Cannot find native binding"). `fsevents` is also optional but +# darwin-only; on linux npm 10 trips EBADPLATFORM on the lockfile +# entry even though the runtime would never load it. `--force` downgrades +# that EBADPLATFORM to a warning while still installing the rolldown +# binding for the current platform. +RUN npm install --include=optional --force --no-audit --no-fund --ignore-scripts + +COPY tsconfig.json tsconfig.app.json tsconfig.node.json vite.config.ts index.html eslint.config.js ./ +COPY public ./public +COPY src ./src -# Build RUN npm run build -# Production stage -FROM node:18-alpine - -WORKDIR /app - -# Copy package files -COPY orchestrator/package*.json ./ - -# Install production dependencies only -RUN npm ci --only=production - -# Copy built files -COPY --from=builder /app/dist ./dist - -# Expose port -EXPOSE 8080 - -# Health check -HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ - CMD node -e "require('http').get('http://localhost:8080/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})" - -# Start application -CMD ["node", "dist/index.js"] +# ------- runtime stage ------- +FROM nginx:1.27-alpine AS runtime +COPY nginx.conf /etc/nginx/conf.d/default.conf +COPY --from=build /app/dist /usr/share/nginx/html +EXPOSE 80 +HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \ + CMD wget -q --spider http://127.0.0.1/ || exit 1 diff --git a/contracts/hardhat.config.ts b/contracts/hardhat.config.ts index 68fff9f..d9f2bd3 100644 --- a/contracts/hardhat.config.ts +++ b/contracts/hardhat.config.ts @@ -15,6 +15,18 @@ const config: HardhatUserConfig = { hardhat: { chainId: 1337, }, + // Public Chain 138 RPC — resolves proxmox blocker EXT-CHAIN138-CI-RPC. + // Deployer key is only read when a tx is actually sent (e.g. via + // `npx hardhat --network chain138 run scripts/deploy-notary-registry.ts`); + // leaving NOTARY_DEPLOYER_PRIVATE_KEY unset is safe for read-only + // flows like `hardhat console --network chain138`. + chain138: { + url: process.env.NOTARY_RPC_URL || "https://rpc.public-0138.defi-oracle.io", + chainId: 138, + accounts: process.env.NOTARY_DEPLOYER_PRIVATE_KEY + ? [process.env.NOTARY_DEPLOYER_PRIVATE_KEY] + : [], + }, }, paths: { sources: "./", diff --git a/contracts/scripts/deploy-notary-registry.ts b/contracts/scripts/deploy-notary-registry.ts new file mode 100644 index 0000000..efce368 --- /dev/null +++ b/contracts/scripts/deploy-notary-registry.ts @@ -0,0 +1,243 @@ +/** + * Dedicated NotaryRegistry deploy script. + * + * Self-compiles NotaryRegistry.sol + its two interfaces + the OpenZeppelin + * Ownable dependency via solc-js in-process, so it does NOT depend on + * `hardhat compile` (hardhat's source-glob picks up node_modules under + * contracts/ and trips HH1006 on this repo — see E2E helper + * orchestrator/tests/e2e/helpers/compileNotaryRegistry.ts for the same + * trick). + * + * Environment inputs (all read from `process.env`, no CLI args): + * + * NOTARY_RPC_URL RPC endpoint (required unless NOTARY_DRY_RUN=1) + * NOTARY_DEPLOYER_PRIVATE_KEY Hex-encoded funded deployer key (required unless NOTARY_DRY_RUN=1) + * NOTARY_INITIAL_OWNER Address that receives ownership (defaults to deployer) + * NOTARY_DRY_RUN "1" to compile + print calldata shape + skip sending + * + * Usage: + * + * # From contracts/: + * NOTARY_RPC_URL=https://rpc.public-0138.defi-oracle.io \ + * NOTARY_DEPLOYER_PRIVATE_KEY=0x... \ + * npx ts-node scripts/deploy-notary-registry.ts + * + * # Dry run (no RPC contact, no key required — CI smoke test): + * NOTARY_DRY_RUN=1 npx ts-node scripts/deploy-notary-registry.ts + * + * The script prints a machine-readable JSON envelope as its LAST line so + * callers (Makefile, CI, scripts piping into .env.sandbox) can grep the + * address out: + * + * {"contract":"NotaryRegistry","address":"0x...","txHash":"0x...","chainId":138} + */ + +import { readFileSync } from "node:fs"; +import { dirname, join, resolve } from "node:path"; +import { ContractFactory, JsonRpcProvider, Wallet, isAddress } from "ethers"; + +// eslint-disable-next-line @typescript-eslint/no-require-imports, @typescript-eslint/no-var-requires +const solc = require("solc"); + +const CONTRACTS_ROOT = resolve(__dirname, ".."); +const OZ_ROOT = join(CONTRACTS_ROOT, "node_modules", "@openzeppelin"); + +type AbiFragment = Record; + +interface CompiledArtifact { + abi: AbiFragment[]; + bytecode: string; +} + +interface SolcSource { + content: string; +} + +interface SolcInput { + language: "Solidity"; + sources: Record; + settings: { + optimizer: { enabled: true; runs: number }; + outputSelection: Record>; + }; +} + +interface SolcOutput { + errors?: Array<{ severity: "error" | "warning"; formattedMessage: string }>; + contracts: Record< + string, + Record + >; +} + +function findImports(requestedPath: string): { contents: string } | { error: string } { + if (requestedPath.startsWith("@openzeppelin/")) { + const rel = requestedPath.replace("@openzeppelin/", ""); + try { + return { contents: readFileSync(join(OZ_ROOT, rel), "utf8") }; + } catch (e) { + return { error: `Could not read ${requestedPath}: ${(e as Error).message}` }; + } + } + try { + return { contents: readFileSync(join(CONTRACTS_ROOT, requestedPath), "utf8") }; + } catch (e) { + return { error: (e as Error).message }; + } +} + +function collectSources(entryPath: string): Record { + const sources: Record = {}; + const stack: string[] = [entryPath]; + const seen = new Set(); + + while (stack.length > 0) { + const cur = stack.pop()!; + if (seen.has(cur)) continue; + seen.add(cur); + + let content: string; + if (cur === entryPath) { + content = readFileSync(join(CONTRACTS_ROOT, "NotaryRegistry.sol"), "utf8"); + } else { + const resolved = findImports(cur); + if ("error" in resolved) { + throw new Error(`Unresolved import: ${cur} (${resolved.error})`); + } + content = resolved.contents; + } + sources[cur] = { content }; + + const importRe = /^\s*import\s+(?:\{[^}]+\}\s+from\s+)?"([^"]+)";/gm; + let m: RegExpExecArray | null; + while ((m = importRe.exec(content)) !== null) { + const rawImport = m[1]; + let normalised: string; + if (rawImport.startsWith("@openzeppelin/")) { + normalised = rawImport; + } else if (rawImport.startsWith("./") || rawImport.startsWith("../")) { + const curDir = cur.includes("/") ? dirname(cur) : "."; + const joined = join(curDir, rawImport); + normalised = joined.startsWith(".") ? joined.slice(2) : joined; + } else { + normalised = rawImport; + } + if (!seen.has(normalised)) stack.push(normalised); + } + } + + return sources; +} + +function compileNotaryRegistry(): CompiledArtifact { + const entry = "NotaryRegistry.sol"; + const sources = collectSources(entry); + const input: SolcInput = { + language: "Solidity", + sources, + settings: { + optimizer: { enabled: true, runs: 200 }, + outputSelection: { "*": { "*": ["abi", "evm.bytecode.object"] } }, + }, + }; + const output: SolcOutput = JSON.parse( + solc.compile(JSON.stringify(input), { import: findImports }), + ); + const fatal = (output.errors ?? []).filter((e) => e.severity === "error"); + if (fatal.length > 0) { + throw new Error( + `[deploy-notary-registry] solc compile failed:\n${fatal + .map((e) => e.formattedMessage) + .join("\n")}`, + ); + } + const artifact = output.contracts[entry]?.NotaryRegistry; + if (!artifact) { + throw new Error( + "[deploy-notary-registry] solc did not emit NotaryRegistry artifact", + ); + } + return { + abi: artifact.abi, + bytecode: "0x" + artifact.evm.bytecode.object, + }; +} + +function require1(name: string): string { + const v = process.env[name]; + if (!v) { + throw new Error(`[deploy-notary-registry] ${name} is required`); + } + return v; +} + +async function main(): Promise { + const dryRun = process.env.NOTARY_DRY_RUN === "1"; + const artifact = compileNotaryRegistry(); + + if (dryRun) { + const initialOwner = + process.env.NOTARY_INITIAL_OWNER || + "0x0000000000000000000000000000000000000001"; + if (!isAddress(initialOwner)) { + throw new Error( + `[deploy-notary-registry] NOTARY_INITIAL_OWNER is not a valid address: ${initialOwner}`, + ); + } + const factory = new ContractFactory(artifact.abi, artifact.bytecode); + const deployTx = await factory.getDeployTransaction(initialOwner); + const envelope = { + contract: "NotaryRegistry", + dryRun: true, + initialOwner, + bytecodeLength: artifact.bytecode.length, + calldataLength: (deployTx.data as string).length, + abiEntryCount: artifact.abi.length, + }; + console.log(JSON.stringify(envelope)); + return; + } + + const rpcUrl = require1("NOTARY_RPC_URL"); + const pk = require1("NOTARY_DEPLOYER_PRIVATE_KEY"); + const provider = new JsonRpcProvider(rpcUrl, undefined, { + staticNetwork: true, + cacheTimeout: -1, + }); + const wallet = new Wallet(pk, provider); + const deployerAddr = await wallet.getAddress(); + const initialOwner = process.env.NOTARY_INITIAL_OWNER || deployerAddr; + if (!isAddress(initialOwner)) { + throw new Error( + `[deploy-notary-registry] NOTARY_INITIAL_OWNER is not a valid address: ${initialOwner}`, + ); + } + const net = await provider.getNetwork(); + const bal = await provider.getBalance(deployerAddr); + console.error( + `[deploy-notary-registry] deployer=${deployerAddr} chainId=${net.chainId} balance=${bal} initialOwner=${initialOwner}`, + ); + if (bal === BigInt(0)) { + throw new Error( + `[deploy-notary-registry] deployer ${deployerAddr} has zero balance on chainId=${net.chainId}. Fund the account before deploying.`, + ); + } + + const factory = new ContractFactory(artifact.abi, artifact.bytecode, wallet); + const contract = await factory.deploy(initialOwner); + const receipt = await contract.deploymentTransaction()?.wait(); + const address = await contract.getAddress(); + const envelope = { + contract: "NotaryRegistry", + address, + txHash: receipt?.hash, + chainId: Number(net.chainId), + initialOwner, + }; + console.log(JSON.stringify(envelope)); +} + +main().catch((err) => { + console.error(err); + process.exit(1); +}); diff --git a/docker-compose.yml b/docker-compose.yml index a982677..efaeff7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,28 +1,44 @@ -version: '3.8' +# CurrenciCombo sandbox stack — orchestrator + portal + Postgres + Redis. +# +# Usage: +# +# cp .env.sandbox.example .env.sandbox +# # edit .env.sandbox as needed +# docker compose --env-file .env.sandbox up -d +# curl http://localhost:${ORCHESTRATOR_PORT:-8080}/health +# curl http://localhost:${ORCHESTRATOR_PORT:-8080}/ready +# open http://localhost:${PORTAL_PORT:-3000}/ +# +# External blockers from proxmox/scripts/verify/check-external-dependencies.sh +# surface in the orchestrator's boot-time log summary (see PR Y). Leaving +# DBIS_CORE_URL / FIN_SANDBOX_URL / CC_IDENTITY_URL unset is expected in +# the sandbox — the services fall back to deterministic mocks and tag +# the EXT-* blocker id in every log line. +# +# EXT-CHAIN138-CI-RPC is resolved out of the box: CHAIN_138_RPC_URL +# defaults to the public endpoint at https://rpc.public-0138.defi-oracle.io. services: - # PostgreSQL database postgres: image: postgres:15-alpine environment: - POSTGRES_DB: comboflow - POSTGRES_USER: comboflow - POSTGRES_PASSWORD: comboflow + POSTGRES_DB: ${POSTGRES_DB:-currencicombo} + POSTGRES_USER: ${POSTGRES_USER:-currencicombo} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-currencicombo} ports: - - "5432:5432" + - "${POSTGRES_PORT:-5432}:5432" volumes: - postgres_data:/var/lib/postgresql/data healthcheck: - test: ["CMD-SHELL", "pg_isready -U comboflow"] + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-currencicombo} -d ${POSTGRES_DB:-currencicombo}"] interval: 10s timeout: 5s retries: 5 - # Redis cache redis: image: redis:7-alpine ports: - - "6379:6379" + - "${REDIS_PORT:-6379}:6379" volumes: - redis_data:/data healthcheck: @@ -31,43 +47,56 @@ services: timeout: 3s retries: 5 - # Orchestrator service orchestrator: build: - context: . + context: ./orchestrator dockerfile: Dockerfile + image: currencicombo/orchestrator:local ports: - - "8080:8080" + - "${ORCHESTRATOR_PORT:-8080}:8080" environment: NODE_ENV: production - PORT: 8080 - DATABASE_URL: postgresql://comboflow:comboflow@postgres:5432/comboflow + PORT: "8080" + DATABASE_URL: postgresql://${POSTGRES_USER:-currencicombo}:${POSTGRES_PASSWORD:-currencicombo}@postgres:5432/${POSTGRES_DB:-currencicombo} REDIS_URL: redis://redis:6379 + # --- required for signed events (PR O) --- + EVENT_SIGNING_SECRET: ${EVENT_SIGNING_SECRET} + # --- API keys (PR M) — comma-separated key:role pairs --- + ORCHESTRATOR_API_KEYS: ${ORCHESTRATOR_API_KEYS} + # --- Chain 138 (EXT-CHAIN138-CI-RPC — resolved) --- + CHAIN_138_RPC_URL: ${CHAIN_138_RPC_URL:-https://rpc.public-0138.defi-oracle.io} + NOTARY_REGISTRY_ADDRESS: ${NOTARY_REGISTRY_ADDRESS:-} + ORCHESTRATOR_PRIVATE_KEY: ${ORCHESTRATOR_PRIVATE_KEY:-} + # --- External blockers (intentionally unset in sandbox) --- + DBIS_CORE_URL: ${DBIS_CORE_URL:-} + FIN_SANDBOX_URL: ${FIN_SANDBOX_URL:-} + CC_IDENTITY_URL: ${CC_IDENTITY_URL:-} + CC_CONTROLS_MATRIX_URL: ${CC_CONTROLS_MATRIX_URL:-} depends_on: postgres: condition: service_healthy redis: condition: service_healthy healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"] + test: ["CMD", "node", "-e", "require('http').get('http://127.0.0.1:8080/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"] interval: 30s timeout: 10s retries: 3 + start_period: 15s - # Frontend - webapp: + portal: build: - context: ./webapp + context: . dockerfile: Dockerfile + args: + VITE_ORCHESTRATOR_URL: ${VITE_ORCHESTRATOR_URL:-http://localhost:8080} + image: currencicombo/portal:local ports: - - "3000:3000" - environment: - NODE_ENV: production - NEXT_PUBLIC_ORCH_URL: http://orchestrator:8080 + - "${PORTAL_PORT:-3000}:80" depends_on: - - orchestrator + orchestrator: + condition: service_healthy volumes: postgres_data: redis_data: - diff --git a/nginx.conf b/nginx.conf new file mode 100644 index 0000000..5033ef1 --- /dev/null +++ b/nginx.conf @@ -0,0 +1,28 @@ +server { + listen 80; + server_name _; + + # Static SPA — vite build output lives here. + root /usr/share/nginx/html; + index index.html; + + # Long-cache hashed assets produced by vite's rollup chunks. + location /assets/ { + access_log off; + expires 1y; + add_header Cache-Control "public, max-age=31536000, immutable"; + try_files $uri =404; + } + + # SPA fallback — every other path yields index.html so client-side + # react-router can take over (see src/App.tsx / ). + location / { + try_files $uri $uri/ /index.html; + } + + # Defensive: no sourcemap exposure in sandbox. + location ~ \.map$ { + deny all; + return 404; + } +} diff --git a/orchestrator/Dockerfile b/orchestrator/Dockerfile new file mode 100644 index 0000000..24548e7 --- /dev/null +++ b/orchestrator/Dockerfile @@ -0,0 +1,54 @@ +# Multi-stage build for the CurrenciCombo orchestrator. +# +# Context MUST be the orchestrator/ directory so the build does not +# need to traverse the whole repo. Build from repo root with: +# +# docker build -t currencicombo/orchestrator:local -f orchestrator/Dockerfile orchestrator/ +# +# or via docker-compose (see docker-compose.yml at repo root). + +# ------- deps stage ------- +FROM node:20-alpine AS deps +WORKDIR /app +COPY package.json package-lock.json ./ +# `fsevents` is a darwin-only optional dep pulled in transitively via +# ganache + jest; npm 10's `ci` still validates the darwin-pinned +# entries on linux builders and fails with EBADPLATFORM. Use +# `npm install --omit=optional` to sidestep the strict check; we do +# not need reproducible nested optional resolutions for a runtime-only +# image (the tsc build only touches first-party deps). +RUN npm install --omit=optional --no-audit --no-fund --ignore-scripts + +# ------- build stage ------- +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +COPY --from=deps /app/node_modules ./node_modules +COPY tsconfig.json ./ +COPY src ./src +RUN npm run build + +# ------- runtime stage ------- +FROM node:20-alpine AS runtime +WORKDIR /app +ENV NODE_ENV=production +ENV PORT=8080 + +RUN apk add --no-cache dumb-init \ + && addgroup -S orchestrator \ + && adduser -S -G orchestrator orchestrator + +COPY package.json package-lock.json ./ +RUN npm install --omit=dev --omit=optional --no-audit --no-fund --ignore-scripts \ + && npm cache clean --force + +COPY --from=build /app/dist ./dist + +USER orchestrator +EXPOSE 8080 + +HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ + CMD node -e "require('http').get('http://127.0.0.1:8080/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))" + +ENTRYPOINT ["/usr/bin/dumb-init", "--"] +CMD ["node", "dist/index.js"] diff --git a/orchestrator/src/config/env.ts b/orchestrator/src/config/env.ts index e09f2c4..2c5b80f 100644 --- a/orchestrator/src/config/env.ts +++ b/orchestrator/src/config/env.ts @@ -1,4 +1,18 @@ -import { z } from "zod"; +import { z, ZodTypeAny } from "zod"; + +/** + * Empty strings from `.env`-loaded variables (docker-compose with + * `NOTARY_REGISTRY_ADDRESS=` in .env.sandbox, Kubernetes `valueFrom` + * secrets that resolve to "", etc.) should validate identically to + * the variable being unset. Without this coercion, zod's + * `.regex(...).optional()` rejects `""` because the value IS provided. + */ +function emptyToUndefined(schema: T) { + return z.preprocess( + (v) => (typeof v === "string" && v.length === 0 ? undefined : v), + schema, + ); +} /** * Environment variable validation schema @@ -6,22 +20,26 @@ import { z } from "zod"; const envSchema = z.object({ NODE_ENV: z.enum(["development", "production", "test"]).default("development"), PORT: z.string().transform(Number).pipe(z.number().int().positive()), - DATABASE_URL: z.string().url().optional(), - API_KEYS: z.string().optional(), - REDIS_URL: z.string().url().optional(), + DATABASE_URL: emptyToUndefined(z.string().url().optional()), + API_KEYS: emptyToUndefined(z.string().optional()), + REDIS_URL: emptyToUndefined(z.string().url().optional()), LOG_LEVEL: z.enum(["error", "warn", "info", "debug"]).default("info"), - ALLOWED_IPS: z.string().optional(), + ALLOWED_IPS: emptyToUndefined(z.string().optional()), SESSION_SECRET: z.string().min(32), - JWT_SECRET: z.string().min(32).optional(), - AZURE_KEY_VAULT_URL: z.string().url().optional(), - AWS_SECRETS_MANAGER_REGION: z.string().optional(), - SENTRY_DSN: z.string().url().optional(), + JWT_SECRET: emptyToUndefined(z.string().min(32).optional()), + AZURE_KEY_VAULT_URL: emptyToUndefined(z.string().url().optional()), + AWS_SECRETS_MANAGER_REGION: emptyToUndefined(z.string().optional()), + SENTRY_DSN: emptyToUndefined(z.string().url().optional()), // Chain-138 + NotaryRegistry wiring (arch §4.5). All optional; when // absent the notary adapter falls back to its deterministic mock. - CHAIN_138_RPC_URL: z.string().url().optional(), - CHAIN_138_CHAIN_ID: z.string().regex(/^\d+$/).optional(), - NOTARY_REGISTRY_ADDRESS: z.string().regex(/^0x[0-9a-fA-F]{40}$/).optional(), - ORCHESTRATOR_PRIVATE_KEY: z.string().regex(/^0x[0-9a-fA-F]{64}$/).optional(), + CHAIN_138_RPC_URL: emptyToUndefined(z.string().url().optional()), + CHAIN_138_CHAIN_ID: emptyToUndefined(z.string().regex(/^\d+$/).optional()), + NOTARY_REGISTRY_ADDRESS: emptyToUndefined( + z.string().regex(/^0x[0-9a-fA-F]{40}$/).optional(), + ), + ORCHESTRATOR_PRIVATE_KEY: emptyToUndefined( + z.string().regex(/^0x[0-9a-fA-F]{64}$/).optional(), + ), }); /**