Initial Phoenix Sankofa Cloud setup

- Complete project structure with Next.js frontend
- GraphQL API backend with Apollo Server
- Portal application with NextAuth
- Crossplane Proxmox provider
- GitOps configurations
- CI/CD pipelines
- Testing infrastructure (Vitest, Jest, Go tests)
- Error handling and monitoring
- Security hardening
- UI component library
- Documentation
This commit is contained in:
defiQUG
2025-11-28 12:54:33 -08:00
commit 6f28146ac3
229 changed files with 43136 additions and 0 deletions

9
.eslintignore Normal file
View File

@@ -0,0 +1,9 @@
node_modules
.next
dist
build
coverage
*.config.js
*.config.ts
public

35
.eslintrc.json Normal file
View File

@@ -0,0 +1,35 @@
{
"extends": [
"next/core-web-vitals",
"plugin:@typescript-eslint/recommended",
"plugin:jsx-a11y/recommended"
],
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 2020,
"sourceType": "module",
"ecmaFeatures": {
"jsx": true
}
},
"plugins": ["@typescript-eslint", "jsx-a11y", "import"],
"rules": {
"@typescript-eslint/no-unused-vars": ["warn", { "argsIgnorePattern": "^_" }],
"@typescript-eslint/no-explicit-any": "error",
"@typescript-eslint/explicit-function-return-type": "off",
"@typescript-eslint/explicit-module-boundary-types": "off",
"react-hooks/exhaustive-deps": "warn",
"react-hooks/rules-of-hooks": "error",
"import/order": [
"warn",
{
"groups": ["builtin", "external", "internal", "parent", "sibling", "index"],
"newlines-between": "always",
"alphabetize": { "order": "asc", "caseInsensitive": true }
}
],
"jsx-a11y/alt-text": "warn",
"jsx-a11y/anchor-is-valid": "warn"
}
}

31
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
version: 2
updates:
- package-ecosystem: "npm"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
- package-ecosystem: "npm"
directory: "/portal"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
- package-ecosystem: "npm"
directory: "/api"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
- package-ecosystem: "gomod"
directory: "/crossplane-provider-proxmox"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

114
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,114 @@
name: CI
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm lint
type-check:
name: Type Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm type-check
format-check:
name: Format Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm format:check
test:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm test --run
- uses: codecov/codecov-action@v3
with:
files: ./coverage/coverage-final.json
flags: unittests
name: codecov-umbrella
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm build
- name: Upload build artifacts
uses: actions/upload-artifact@v3
with:
name: build
path: .next
accessibility:
name: Accessibility Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- name: Run accessibility tests
run: |
# Install pa11y or similar accessibility testing tool
npm install -g @pa11y/pa11y-ci
# Run accessibility checks (requires built app)
echo "Accessibility checks would run here after build"

63
.github/workflows/crossplane-ci.yml vendored Normal file
View File

@@ -0,0 +1,63 @@
name: Crossplane Provider CI
on:
push:
branches: [main, develop]
paths:
- 'crossplane-provider-proxmox/**'
- '.github/workflows/crossplane-ci.yml'
pull_request:
branches: [main, develop]
paths:
- 'crossplane-provider-proxmox/**'
jobs:
test:
name: Go Test
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./crossplane-provider-proxmox
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: '1.21'
- run: go mod download
- run: go test -v -race -coverprofile=coverage.out ./...
- uses: codecov/codecov-action@v3
with:
files: ./crossplane-provider-proxmox/coverage.out
flags: crossplane
name: codecov-crossplane
lint:
name: Go Lint
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./crossplane-provider-proxmox
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: '1.21'
- uses: golangci/golangci-lint-action@v3
with:
version: latest
working-directory: ./crossplane-provider-proxmox
build:
name: Go Build
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./crossplane-provider-proxmox
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: '1.21'
- run: go mod download
- run: make build

29
.github/workflows/deploy-dev.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Deploy to Dev
on:
push:
branches: [develop]
workflow_dispatch:
jobs:
deploy:
name: Deploy to Development
runs-on: ubuntu-latest
environment:
name: development
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm build
- name: Deploy to development environment
run: |
echo "Deploying to development..."
# Add your deployment commands here

32
.github/workflows/deploy-prod.yml vendored Normal file
View File

@@ -0,0 +1,32 @@
name: Deploy to Production
on:
workflow_dispatch:
inputs:
version:
description: 'Version to deploy'
required: true
type: string
jobs:
deploy:
name: Deploy to Production
runs-on: ubuntu-latest
environment:
name: production
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm build
- name: Deploy to production environment
run: |
echo "Deploying version ${{ github.event.inputs.version }} to production..."
# Add your deployment commands here

29
.github/workflows/deploy-staging.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Deploy to Staging
on:
push:
branches: [main]
workflow_dispatch:
jobs:
deploy:
name: Deploy to Staging
runs-on: ubuntu-latest
environment:
name: staging
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm build
- name: Deploy to staging environment
run: |
echo "Deploying to staging..."
# Add your deployment commands here

83
.github/workflows/portal-ci.yml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: Portal CI
on:
push:
branches: [main, develop]
paths:
- 'portal/**'
- '.github/workflows/portal-ci.yml'
pull_request:
branches: [main, develop]
paths:
- 'portal/**'
jobs:
lint:
name: Portal Lint
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./portal
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: portal/package-lock.json
- run: npm ci
- run: npm run lint
type-check:
name: Portal Type Check
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./portal
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: portal/package-lock.json
- run: npm ci
- run: npm run type-check
test:
name: Portal Test
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./portal
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: portal/package-lock.json
- run: npm ci
- run: npm test -- --coverage
- uses: codecov/codecov-action@v3
with:
files: ./portal/coverage/coverage-final.json
flags: portal
name: codecov-portal
build:
name: Portal Build
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./portal
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: portal/package-lock.json
- run: npm ci
- run: npm run build

68
.gitignore vendored Normal file
View File

@@ -0,0 +1,68 @@
# Dependencies
node_modules/
.pnp
.pnp.js
# Testing
coverage/
.nyc_output
# Next.js
.next/
out/
build/
dist/
# Production
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
# Local env files
.env*.local
.env
# Vercel
.vercel
# TypeScript
*.tsbuildinfo
next-env.d.ts
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Temporary files
*.tmp
*.temp
.cache/
# Go
*.exe
*.exe~
*.dll
*.so
*.dylib
*.test
*.out
go.work
# Kubernetes
*.kubeconfig
# Secrets
secrets/
*.pem
*.key
*.crt

10
.lintstagedrc.json Normal file
View File

@@ -0,0 +1,10 @@
{
"*.{ts,tsx}": [
"eslint --fix",
"prettier --write"
],
"*.{json,md,mdx,css,html,yml,yaml,scss}": [
"prettier --write"
]
}

10
.prettierignore Normal file
View File

@@ -0,0 +1,10 @@
node_modules
.next
dist
build
coverage
*.lock
package-lock.json
pnpm-lock.yaml
yarn.lock

9
.prettierrc Normal file
View File

@@ -0,0 +1,9 @@
{
"semi": false,
"singleQuote": true,
"tabWidth": 2,
"trailingComma": "es5",
"printWidth": 100,
"plugins": ["prettier-plugin-tailwindcss"]
}

139
README.md Normal file
View File

@@ -0,0 +1,139 @@
# Phoenix Sankofa Cloud
**The sovereign cloud born of fire and ancestral wisdom.**
## Overview
Phoenix Sankofa Cloud is a next-generation, sovereign AI cloud infrastructure platform that combines:
- **Mythic Power**: Phoenix transformation and rebirth
- **Ancestral Wisdom**: Sankofa memory and return
- **Cultural Identity**: Akan heritage and sovereignty
- **Global Reach**: 325-region deployment
- **Technical Excellence**: World-class cloud infrastructure
## Tech Stack
### Frontend
- **Next.js 14+** (React + TypeScript)
- **TailwindCSS** + **shadcn/ui** for styling
- **Framer Motion** for animations
- **React Flow** for graph editing
- **react-three-fiber** + **drei** for 3D visualizations
- **ECharts** for dashboards
- **TanStack Query** for data fetching
### Backend (Planned)
- **GraphQL API** (Hasura/Postgres or Neo4j)
- **WebSockets** for real-time updates
- **PostgreSQL** for core data
- **Neo4j** (optional) for complex graph queries
## Getting Started
### Prerequisites
- Node.js 18+
- pnpm (recommended) or npm/yarn
### Installation
```bash
# Install dependencies
pnpm install
# Run development server
pnpm dev
# Build for production
pnpm build
# Start production server
pnpm start
```
### Development
```bash
# Type checking
pnpm type-check
# Linting
pnpm lint
# Formatting
pnpm format
# Testing
pnpm test
pnpm test:coverage
```
### Environment Variables
Create a `.env.local` file in the root directory with the following variables:
```env
# GraphQL API
NEXT_PUBLIC_GRAPHQL_ENDPOINT=/api/graphql
# Application
NEXT_PUBLIC_APP_URL=http://localhost:3000
NODE_ENV=development
# Monitoring (optional)
NEXT_PUBLIC_SENTRY_DSN=
SENTRY_AUTH_TOKEN=
# Analytics (optional)
NEXT_PUBLIC_ANALYTICS_ID=
```
See the portal README for portal-specific environment variables.
## Project Structure
```
Sankofa/
├── docs/ # Documentation
│ ├── brand/ # Brand documentation
│ └── architecture/ # Technical architecture
├── src/
│ ├── app/ # Next.js app router pages
│ ├── components/ # React components
│ │ ├── ui/ # UI components (shadcn/ui)
│ │ ├── 3d/ # 3D visualization components
│ │ ├── dashboards/ # Dashboard components
│ │ ├── editors/ # Graph/flow editors
│ │ └── well-architected/ # WAF components
│ ├── lib/ # Utilities and helpers
│ ├── styles/ # Global styles
│ └── content/ # Content files
├── public/ # Static assets
│ └── brand/ # Brand assets
└── package.json
```
## Brand Philosophy
Phoenix Sankofa Cloud is built on the principle of **Remember → Retrieve → Restore → Rise**:
- **Remember**: Where we came from
- **Retrieve**: What was essential
- **Restore**: Identity and sovereignty
- **Rise**: Forward with purpose
## Documentation
See the `/docs` directory for:
- Brand philosophy and positioning
- Product naming system
- Technical architecture
- Well-Architected Framework approach
## License
[To be determined]
---
**Phoenix Sankofa Cloud** — Remember. Retrieve. Restore. Rise.

3151
api/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

35
api/package.json Normal file
View File

@@ -0,0 +1,35 @@
{
"name": "phoenix-sankofa-api",
"version": "1.0.0",
"private": true,
"type": "module",
"scripts": {
"dev": "tsx watch src/server.ts",
"build": "tsc",
"start": "node dist/server.js",
"type-check": "tsc --noEmit",
"db:migrate": "node dist/db/migrate.js",
"db:seed": "tsx src/db/seed.ts"
},
"dependencies": {
"@apollo/server": "^4.9.5",
"@as-integrations/fastify": "^1.1.0",
"fastify": "^4.24.3",
"pg": "^8.11.3",
"graphql": "^16.8.1",
"graphql-tag": "^2.12.6",
"jsonwebtoken": "^9.0.2",
"bcryptjs": "^2.4.3",
"zod": "^3.22.4",
"dotenv": "^16.3.1"
},
"devDependencies": {
"@types/node": "^20.12.0",
"@types/pg": "^8.10.9",
"@types/jsonwebtoken": "^9.0.5",
"@types/bcryptjs": "^2.4.6",
"typescript": "^5.4.0",
"tsx": "^4.7.0"
}
}

12
api/src/context.ts Normal file
View File

@@ -0,0 +1,12 @@
import { FastifyRequest } from 'fastify'
import { Context } from './types/context'
import { getDb } from './db'
export async function createContext(request: FastifyRequest): Promise<Context> {
return {
request,
user: (request as any).user, // Set by auth middleware
db: getDb(),
}
}

32
api/src/db/index.ts Normal file
View File

@@ -0,0 +1,32 @@
import { Pool } from 'pg'
let pool: Pool | null = null
export function getDb(): Pool {
if (!pool) {
pool = new Pool({
host: process.env.DB_HOST || 'localhost',
port: parseInt(process.env.DB_PORT || '5432', 10),
database: process.env.DB_NAME || 'sankofa',
user: process.env.DB_USER || 'postgres',
password: process.env.DB_PASSWORD || 'postgres',
max: 20,
idleTimeoutMillis: 30000,
connectionTimeoutMillis: 2000,
})
pool.on('error', (err) => {
console.error('Unexpected error on idle client', err)
})
}
return pool
}
export async function closeDb(): Promise<void> {
if (pool) {
await pool.end()
pool = null
}
}

64
api/src/db/schema.sql Normal file
View File

@@ -0,0 +1,64 @@
-- Phoenix Sankofa Cloud Database Schema
-- Enable UUID extension
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Users table
CREATE TABLE IF NOT EXISTS users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
email VARCHAR(255) UNIQUE NOT NULL,
name VARCHAR(255) NOT NULL,
password_hash VARCHAR(255) NOT NULL,
role VARCHAR(50) NOT NULL DEFAULT 'USER' CHECK (role IN ('ADMIN', 'USER', 'VIEWER')),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Sites table
CREATE TABLE IF NOT EXISTS sites (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL,
region VARCHAR(255) NOT NULL,
status VARCHAR(50) NOT NULL DEFAULT 'ACTIVE' CHECK (status IN ('ACTIVE', 'INACTIVE', 'MAINTENANCE')),
metadata JSONB,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Resources table
CREATE TABLE IF NOT EXISTS resources (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL,
type VARCHAR(50) NOT NULL CHECK (type IN ('VM', 'CONTAINER', 'STORAGE', 'NETWORK')),
status VARCHAR(50) NOT NULL DEFAULT 'PENDING' CHECK (status IN ('PENDING', 'PROVISIONING', 'RUNNING', 'STOPPED', 'ERROR', 'DELETING')),
site_id UUID NOT NULL REFERENCES sites(id) ON DELETE CASCADE,
metadata JSONB,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Indexes
CREATE INDEX IF NOT EXISTS idx_resources_site_id ON resources(site_id);
CREATE INDEX IF NOT EXISTS idx_resources_type ON resources(type);
CREATE INDEX IF NOT EXISTS idx_resources_status ON resources(status);
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- Update timestamp trigger function
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
-- Triggers
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_sites_updated_at BEFORE UPDATE ON sites
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_resources_updated_at BEFORE UPDATE ON resources
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();

View File

@@ -0,0 +1,39 @@
import { FastifyRequest, FastifyReply } from 'fastify'
import jwt from 'jsonwebtoken'
import { User } from '../types/context'
const JWT_SECRET = process.env.JWT_SECRET || 'your-secret-key-change-in-production'
export async function authMiddleware(
request: FastifyRequest,
_reply: FastifyReply
) {
// Skip auth for health check and GraphQL introspection
if (request.url === '/health' || request.method === 'OPTIONS') {
return
}
// Get token from Authorization header
const authHeader = request.headers.authorization
if (!authHeader || !authHeader.startsWith('Bearer ')) {
// Allow unauthenticated requests - GraphQL will handle auth per query/mutation
return
}
const token = authHeader.substring(7)
try {
const decoded = jwt.verify(token, JWT_SECRET) as any
// Attach user to request
;(request as any).user = {
id: decoded.id,
email: decoded.email,
name: decoded.name,
role: decoded.role,
} as User
} catch (error) {
// Invalid token - let GraphQL resolvers handle it
return
}
}

9
api/src/schema/index.ts Normal file
View File

@@ -0,0 +1,9 @@
import { makeExecutableSchema } from '@graphql-tools/schema'
import { typeDefs } from './typeDefs'
import { resolvers } from './resolvers'
export const schema = makeExecutableSchema({
typeDefs,
resolvers,
})

181
api/src/schema/resolvers.ts Normal file
View File

@@ -0,0 +1,181 @@
import { GraphQLError } from 'graphql'
import { Context } from '../types/context'
import * as resourceService from '../services/resource'
import * as siteService from '../services/site'
import * as userService from '../services/user'
import * as authService from '../services/auth'
export const resolvers = {
Query: {
health: () => ({
status: 'ok',
timestamp: new Date(),
version: process.env.npm_package_version || '1.0.0',
}),
resources: async (_: unknown, args: { filter?: any }, context: Context) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return resourceService.getResources(context, args.filter)
},
resource: async (_: unknown, args: { id: string }, context: Context) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return resourceService.getResource(context, args.id)
},
sites: async (_: unknown, __: unknown, context: Context) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return siteService.getSites(context)
},
site: async (_: unknown, args: { id: string }, context: Context) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return siteService.getSite(context, args.id)
},
me: async (_: unknown, __: unknown, context: Context) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return context.user
},
users: async (_: unknown, __: unknown, context: Context) => {
if (!context.user || context.user.role !== 'ADMIN') {
throw new GraphQLError('Admin access required', {
extensions: { code: 'FORBIDDEN' },
})
}
return userService.getUsers(context)
},
user: async (_: unknown, args: { id: string }, context: Context) => {
if (!context.user || context.user.role !== 'ADMIN') {
throw new GraphQLError('Admin access required', {
extensions: { code: 'FORBIDDEN' },
})
}
return userService.getUser(context, args.id)
},
},
Mutation: {
login: async (_: unknown, args: { email: string; password: string }) => {
return authService.login(args.email, args.password)
},
logout: async (_: unknown, __: unknown, _context: Context) => {
// In a real implementation, invalidate the token
return true
},
createResource: async (
_: unknown,
args: { input: any },
context: Context
) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return resourceService.createResource(context, args.input)
},
updateResource: async (
_: unknown,
args: { id: string; input: any },
context: Context
) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return resourceService.updateResource(context, args.id, args.input)
},
deleteResource: async (
_: unknown,
args: { id: string },
context: Context
) => {
if (!context.user) {
throw new GraphQLError('Authentication required', {
extensions: { code: 'UNAUTHENTICATED' },
})
}
return resourceService.deleteResource(context, args.id)
},
createUser: async (
_: unknown,
args: { input: any },
context: Context
) => {
if (!context.user || context.user.role !== 'ADMIN') {
throw new GraphQLError('Admin access required', {
extensions: { code: 'FORBIDDEN' },
})
}
return userService.createUser(context, args.input)
},
updateUser: async (
_: unknown,
args: { id: string; input: any },
context: Context
) => {
if (!context.user || context.user.role !== 'ADMIN') {
throw new GraphQLError('Admin access required', {
extensions: { code: 'FORBIDDEN' },
})
}
return userService.updateUser(context, args.id, args.input)
},
deleteUser: async (
_: unknown,
args: { id: string },
context: Context
) => {
if (!context.user || context.user.role !== 'ADMIN') {
throw new GraphQLError('Admin access required', {
extensions: { code: 'FORBIDDEN' },
})
}
return userService.deleteUser(context, args.id)
},
},
Resource: {
site: async (resource: any, __: unknown, context: Context) => {
return siteService.getSite(context, resource.siteId)
},
},
Site: {
resources: async (site: any, __: unknown, context: Context) => {
return resourceService.getResources(context, { siteId: site.id })
},
},
}

146
api/src/schema/typeDefs.ts Normal file
View File

@@ -0,0 +1,146 @@
import { gql } from 'graphql-tag'
export const typeDefs = gql`
scalar DateTime
scalar JSON
type Query {
# Health check
health: HealthStatus
# Resources
resources(filter: ResourceFilter): [Resource!]!
resource(id: ID!): Resource
# Sites
sites: [Site!]!
site(id: ID!): Site
# Users
me: User
users: [User!]!
user(id: ID!): User
}
type Mutation {
# Authentication
login(email: String!, password: String!): AuthPayload!
logout: Boolean!
# Resources
createResource(input: CreateResourceInput!): Resource!
updateResource(id: ID!, input: UpdateResourceInput!): Resource!
deleteResource(id: ID!): Boolean!
# Users
createUser(input: CreateUserInput!): User!
updateUser(id: ID!, input: UpdateUserInput!): User!
deleteUser(id: ID!): Boolean!
}
type Subscription {
resourceUpdated(id: ID!): Resource!
resourceCreated: Resource!
resourceDeleted(id: ID!): ID!
}
type HealthStatus {
status: String!
timestamp: DateTime!
version: String!
}
type Resource {
id: ID!
name: String!
type: ResourceType!
status: ResourceStatus!
site: Site!
metadata: JSON
createdAt: DateTime!
updatedAt: DateTime!
}
type Site {
id: ID!
name: String!
region: String!
status: SiteStatus!
resources: [Resource!]!
createdAt: DateTime!
updatedAt: DateTime!
}
type User {
id: ID!
email: String!
name: String!
role: UserRole!
createdAt: DateTime!
updatedAt: DateTime!
}
type AuthPayload {
token: String!
user: User!
}
enum ResourceType {
VM
CONTAINER
STORAGE
NETWORK
}
enum ResourceStatus {
PENDING
PROVISIONING
RUNNING
STOPPED
ERROR
DELETING
}
enum SiteStatus {
ACTIVE
INACTIVE
MAINTENANCE
}
enum UserRole {
ADMIN
USER
VIEWER
}
input ResourceFilter {
type: ResourceType
status: ResourceStatus
siteId: ID
}
input CreateResourceInput {
name: String!
type: ResourceType!
siteId: ID!
metadata: JSON
}
input UpdateResourceInput {
name: String
metadata: JSON
}
input CreateUserInput {
email: String!
name: String!
password: String!
role: UserRole
}
input UpdateUserInput {
name: String
role: UserRole
}
`

52
api/src/server.ts Normal file
View File

@@ -0,0 +1,52 @@
import 'dotenv/config'
import Fastify from 'fastify'
import { ApolloServer } from '@apollo/server'
import { fastifyApolloDrainPlugin, fastifyApolloHandler } from '@as-integrations/fastify'
import { schema } from './schema'
import { createContext } from './context'
import { authMiddleware } from './middleware/auth'
const fastify = Fastify({
logger: true,
})
// Register authentication middleware
fastify.addHook('onRequest', authMiddleware)
// Create Apollo Server
const apolloServer = new ApolloServer({
schema,
plugins: [fastifyApolloDrainPlugin(fastify)],
})
async function startServer() {
try {
// Start Apollo Server
await apolloServer.start()
// Register GraphQL route
fastify.post('/graphql', async (request, reply) => {
return fastifyApolloHandler(apolloServer, {
context: async () => createContext(request),
})(request, reply)
})
// Health check endpoint
fastify.get('/health', async () => {
return { status: 'ok', timestamp: new Date().toISOString() }
})
// Start Fastify server
const port = parseInt(process.env.PORT || '4000', 10)
const host = process.env.HOST || '0.0.0.0'
await fastify.listen({ port, host })
console.log(`🚀 Server ready at http://${host}:${port}/graphql`)
} catch (err) {
fastify.log.error(err)
process.exit(1)
}
}
startServer()

55
api/src/services/auth.ts Normal file
View File

@@ -0,0 +1,55 @@
import jwt from 'jsonwebtoken'
import bcrypt from 'bcryptjs'
import { getDb } from '../db'
import { User } from '../types/context'
const JWT_SECRET = process.env.JWT_SECRET || 'your-secret-key-change-in-production'
const JWT_EXPIRES_IN = process.env.JWT_EXPIRES_IN || '7d'
export interface AuthPayload {
token: string
user: User
}
export async function login(email: string, password: string): Promise<AuthPayload> {
const db = getDb()
const result = await db.query(
'SELECT id, email, name, password_hash, role, created_at, updated_at FROM users WHERE email = $1',
[email]
)
if (result.rows.length === 0) {
throw new Error('Invalid email or password')
}
const user = result.rows[0]
const isValid = await bcrypt.compare(password, user.password_hash)
if (!isValid) {
throw new Error('Invalid email or password')
}
const token = jwt.sign(
{
id: user.id,
email: user.email,
name: user.name,
role: user.role,
},
JWT_SECRET,
{ expiresIn: JWT_EXPIRES_IN }
)
return {
token,
user: {
id: user.id,
email: user.email,
name: user.name,
role: user.role,
createdAt: user.created_at,
updatedAt: user.updated_at,
},
}
}

View File

@@ -0,0 +1,105 @@
import { Context } from '../types/context'
export async function getResources(context: Context, filter?: any) {
const db = context.db
let query = 'SELECT * FROM resources WHERE 1=1'
const params: any[] = []
let paramCount = 1
if (filter?.type) {
query += ` AND type = $${paramCount}`
params.push(filter.type)
paramCount++
}
if (filter?.status) {
query += ` AND status = $${paramCount}`
params.push(filter.status)
paramCount++
}
if (filter?.siteId) {
query += ` AND site_id = $${paramCount}`
params.push(filter.siteId)
paramCount++
}
query += ' ORDER BY created_at DESC'
const result = await db.query(query, params)
return result.rows.map(mapResource)
}
export async function getResource(context: Context, id: string) {
const db = context.db
const result = await db.query('SELECT * FROM resources WHERE id = $1', [id])
if (result.rows.length === 0) {
throw new Error('Resource not found')
}
return mapResource(result.rows[0])
}
export async function createResource(context: Context, input: any) {
const db = context.db
const result = await db.query(
`INSERT INTO resources (name, type, status, site_id, metadata)
VALUES ($1, $2, $3, $4, $5)
RETURNING *`,
[input.name, input.type, 'PENDING', input.siteId, JSON.stringify(input.metadata || {})]
)
return mapResource(result.rows[0])
}
export async function updateResource(context: Context, id: string, input: any) {
const db = context.db
const updates: string[] = []
const params: any[] = []
let paramCount = 1
if (input.name !== undefined) {
updates.push(`name = $${paramCount}`)
params.push(input.name)
paramCount++
}
if (input.metadata !== undefined) {
updates.push(`metadata = $${paramCount}`)
params.push(JSON.stringify(input.metadata))
paramCount++
}
if (updates.length === 0) {
return getResource(context, id)
}
params.push(id)
const result = await db.query(
`UPDATE resources SET ${updates.join(', ')} WHERE id = $${paramCount} RETURNING *`,
params
)
return mapResource(result.rows[0])
}
export async function deleteResource(context: Context, id: string) {
const db = context.db
await db.query('DELETE FROM resources WHERE id = $1', [id])
return true
}
function mapResource(row: any) {
return {
id: row.id,
name: row.name,
type: row.type,
status: row.status,
siteId: row.site_id,
metadata: row.metadata || {},
createdAt: row.created_at,
updatedAt: row.updated_at,
}
}

31
api/src/services/site.ts Normal file
View File

@@ -0,0 +1,31 @@
import { Context } from '../types/context'
export async function getSites(context: Context) {
const db = context.db
const result = await db.query('SELECT * FROM sites ORDER BY created_at DESC')
return result.rows.map(mapSite)
}
export async function getSite(context: Context, id: string) {
const db = context.db
const result = await db.query('SELECT * FROM sites WHERE id = $1', [id])
if (result.rows.length === 0) {
throw new Error('Site not found')
}
return mapSite(result.rows[0])
}
function mapSite(row: any) {
return {
id: row.id,
name: row.name,
region: row.region,
status: row.status,
metadata: row.metadata || {},
createdAt: row.created_at,
updatedAt: row.updated_at,
}
}

88
api/src/services/user.ts Normal file
View File

@@ -0,0 +1,88 @@
import bcrypt from 'bcryptjs'
import { Context } from '../types/context'
export async function getUsers(context: Context) {
const db = context.db
const result = await db.query(
'SELECT id, email, name, role, created_at, updated_at FROM users ORDER BY created_at DESC'
)
return result.rows.map(mapUser)
}
export async function getUser(context: Context, id: string) {
const db = context.db
const result = await db.query(
'SELECT id, email, name, role, created_at, updated_at FROM users WHERE id = $1',
[id]
)
if (result.rows.length === 0) {
throw new Error('User not found')
}
return mapUser(result.rows[0])
}
export async function createUser(context: Context, input: any) {
const db = context.db
const passwordHash = await bcrypt.hash(input.password, 10)
const result = await db.query(
`INSERT INTO users (email, name, password_hash, role)
VALUES ($1, $2, $3, $4)
RETURNING id, email, name, role, created_at, updated_at`,
[input.email, input.name, passwordHash, input.role || 'USER']
)
return mapUser(result.rows[0])
}
export async function updateUser(context: Context, id: string, input: any) {
const db = context.db
const updates: string[] = []
const params: any[] = []
let paramCount = 1
if (input.name !== undefined) {
updates.push(`name = $${paramCount}`)
params.push(input.name)
paramCount++
}
if (input.role !== undefined) {
updates.push(`role = $${paramCount}`)
params.push(input.role)
paramCount++
}
if (updates.length === 0) {
return getUser(context, id)
}
params.push(id)
const result = await db.query(
`UPDATE users SET ${updates.join(', ')} WHERE id = $${paramCount}
RETURNING id, email, name, role, created_at, updated_at`,
params
)
return mapUser(result.rows[0])
}
export async function deleteUser(context: Context, id: string) {
const db = context.db
await db.query('DELETE FROM users WHERE id = $1', [id])
return true
}
function mapUser(row: any) {
return {
id: row.id,
email: row.email,
name: row.name,
role: row.role,
createdAt: row.created_at,
updatedAt: row.updated_at,
}
}

17
api/src/types/context.ts Normal file
View File

@@ -0,0 +1,17 @@
import { FastifyRequest } from 'fastify'
export interface User {
id: string
email: string
name: string
role: 'ADMIN' | 'USER' | 'VIEWER'
createdAt: Date
updatedAt: Date
}
export interface Context {
request: FastifyRequest
user?: User
db: any // Database connection - will be typed properly later
}

22
api/tsconfig.json Normal file
View File

@@ -0,0 +1,22 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"lib": ["ES2022"],
"moduleResolution": "node",
"rootDir": "./src",
"outDir": "./dist",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"types": ["node"]
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

84
cloudflare/README.md Normal file
View File

@@ -0,0 +1,84 @@
# Cloudflare Zero Trust Configuration
This directory contains all Cloudflare Zero Trust configurations for secure global access to the hybrid cloud control plane.
## Structure
```
cloudflare/
├── access-policies.yaml # Access policies for applications
├── tunnel-configs/ # Per-site tunnel configurations
├── gateway-policies.yaml # DNS and filtering policies
├── warp-config.json # WARP device enrollment
└── terraform/ # Terraform modules for Cloudflare
```
## Components
### Access Policies
Define who can access which applications based on:
- User identity (from Keycloak)
- Device posture
- IP address
- MFA requirements
- Time-based rules
### Tunnels
Outbound-only connections from Proxmox sites and control plane to Cloudflare:
- No public IPs required
- Automatic reconnection
- Load balancing across multiple tunnels
- Health checks
### Gateway Policies
DNS filtering and network security:
- Block malicious domains
- Log DNS queries
- Apply policies based on user/device
- Split DNS for internal services
### WARP
Device-level VPN for employees:
- Zero Trust network access
- Device posture checks
- Automatic enrollment
## Usage
### Apply Access Policies
```bash
# Using Cloudflare API
cloudflared access policy create --config access-policies.yaml
# Or via Terraform
cd terraform
terraform apply
```
### Deploy Tunnels
1. Create tunnel in Cloudflare dashboard
2. Copy tunnel token
3. Update tunnel config with token
4. Deploy cloudflared agent with config
```bash
cloudflared tunnel run --config tunnel-configs/site-1.yaml
```
### Configure WARP
1. Create WARP enrollment in Cloudflare dashboard
2. Update warp-config.json with enrollment details
3. Distribute config to devices
## Security Best Practices
- Use service tokens for API access
- Rotate tunnel tokens regularly
- Enable MFA for all access policies
- Use device posture checks
- Log all access attempts
- Review policies quarterly

View File

@@ -0,0 +1,263 @@
# Cloudflare Zero Trust Access Policies
# These policies control who can access which applications
apiVersion: v1
kind: ConfigMap
metadata:
name: cloudflare-access-policies
namespace: default
data:
# Portal Access Policy
portal-policy: |
{
"name": "Portal Access",
"application": {
"domain": "portal.yourdomain.com",
"name": "Hybrid Cloud Portal"
},
"policies": [
{
"name": "Allow Authenticated Users",
"decision": "allow",
"include": [
{
"email": {
"domain": "yourdomain.com"
}
}
],
"require": [
{
"email": {
"domain": "yourdomain.com"
}
}
],
"session_duration": "24h"
},
{
"name": "Require MFA for Admins",
"decision": "allow",
"include": [
{
"group": {
"name": "admins"
}
}
],
"require": [
{
"mfa": {}
}
],
"session_duration": "8h"
}
]
}
# Rancher Access Policy
rancher-policy: |
{
"name": "Rancher Access",
"application": {
"domain": "rancher.yourdomain.com",
"name": "Rancher UI"
},
"policies": [
{
"name": "Allow Admin Group",
"decision": "allow",
"include": [
{
"group": {
"name": "admins"
}
},
{
"group": {
"name": "platform-engineers"
}
}
],
"require": [
{
"mfa": {}
},
{
"device_posture": {
"check": "managed_device"
}
}
],
"session_duration": "4h"
}
]
}
# ArgoCD Access Policy
argocd-policy: |
{
"name": "ArgoCD Access",
"application": {
"domain": "argocd.yourdomain.com",
"name": "ArgoCD GitOps"
},
"policies": [
{
"name": "Allow Platform Engineers",
"decision": "allow",
"include": [
{
"group": {
"name": "platform-engineers"
}
},
{
"group": {
"name": "admins"
}
}
],
"require": [
{
"mfa": {}
}
],
"session_duration": "8h"
}
]
}
# Grafana Access Policy
grafana-policy: |
{
"name": "Grafana Access",
"application": {
"domain": "grafana.yourdomain.com",
"name": "Grafana Dashboards"
},
"policies": [
{
"name": "Allow All Authenticated",
"decision": "allow",
"include": [
{
"email": {
"domain": "yourdomain.com"
}
}
],
"session_duration": "24h"
}
]
}
# Vault Access Policy
vault-policy: |
{
"name": "Vault Access",
"application": {
"domain": "vault.yourdomain.com",
"name": "HashiCorp Vault"
},
"policies": [
{
"name": "Allow Admin Group Only",
"decision": "allow",
"include": [
{
"group": {
"name": "admins"
}
}
],
"require": [
{
"mfa": {}
},
{
"device_posture": {
"check": "managed_device"
}
}
],
"session_duration": "2h"
}
]
}
# Proxmox API Access Policy
proxmox-api-policy: |
{
"name": "Proxmox API Access",
"application": {
"domain": "proxmox-api.yourdomain.com",
"name": "Proxmox API"
},
"policies": [
{
"name": "Allow Service Accounts",
"decision": "allow",
"include": [
{
"service_token": {
"name": "crossplane-proxmox-token"
}
}
],
"session_duration": "1h"
},
{
"name": "Allow Platform Engineers",
"decision": "allow",
"include": [
{
"group": {
"name": "platform-engineers"
}
}
],
"require": [
{
"mfa": {}
}
],
"session_duration": "4h"
}
]
}
# Keycloak Access Policy
keycloak-policy: |
{
"name": "Keycloak Access",
"application": {
"domain": "keycloak.yourdomain.com",
"name": "Keycloak Admin"
},
"policies": [
{
"name": "Allow Admin Group Only",
"decision": "allow",
"include": [
{
"group": {
"name": "admins"
}
}
],
"require": [
{
"mfa": {}
},
{
"device_posture": {
"check": "managed_device"
}
}
],
"session_duration": "2h"
}
]
}

View File

@@ -0,0 +1,149 @@
# Cloudflare Gateway Policies
# DNS filtering and network security policies
apiVersion: v1
kind: ConfigMap
metadata:
name: cloudflare-gateway-policies
namespace: default
data:
# DNS Policies
dns-policies: |
{
"policies": [
{
"name": "Block Malicious Domains",
"action": "block",
"precedence": 1,
"filters": [
{
"type": "dns",
"categories": [
"malware",
"phishing",
"command-and-control",
"ransomware",
"spyware"
]
}
]
},
{
"name": "Block Adult Content",
"action": "block",
"precedence": 2,
"filters": [
{
"type": "dns",
"categories": [
"adult"
]
}
],
"identity": {
"groups": [
{
"name": "employees"
}
]
}
},
{
"name": "Allow All for Admins",
"action": "allow",
"precedence": 100,
"identity": {
"groups": [
{
"name": "admins"
}
]
}
}
]
}
# Network Policies
network-policies: |
{
"policies": [
{
"name": "Block High Risk Ports",
"action": "block",
"precedence": 1,
"rules": [
{
"protocol": "tcp",
"ports": [
"22",
"23",
"135",
"139",
"445",
"1433",
"3306",
"3389",
"5432"
]
}
],
"identity": {
"groups": [
{
"name": "employees"
}
}
}
},
{
"name": "Allow Admin Access",
"action": "allow",
"precedence": 100,
"identity": {
"groups": [
{
"name": "admins"
},
{
"name": "platform-engineers"
}
}
}
}
]
}
# Logging Configuration
logging-config: |
{
"dns": {
"enabled": true,
"log_all": true,
"log_blocks": true
},
"network": {
"enabled": true,
"log_all": true,
"log_blocks": true
},
"retention": {
"days": 30
}
}
# Split DNS Configuration
split-dns: |
{
"domains": [
"yourdomain.com",
"*.yourdomain.com",
"*.svc.cluster.local",
"*.local"
],
"dns_servers": [
"10.0.0.53",
"10.1.0.53",
"10.2.0.53"
]
}

13
cloudflare/terraform/.gitignore vendored Normal file
View File

@@ -0,0 +1,13 @@
# Terraform files
*.tfstate
*.tfstate.*
.terraform/
.terraform.lock.hcl
terraform.tfvars
*.tfvars
crash.log
override.tf
override.tf.json
*_override.tf
*_override.tf.json

View File

@@ -0,0 +1,250 @@
terraform {
required_version = ">= 1.0"
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 4.0"
}
}
backend "s3" {
# Configure your backend here
# bucket = "your-terraform-state"
# key = "cloudflare/terraform.tfstate"
# region = "us-east-1"
}
}
provider "cloudflare" {
api_token = var.cloudflare_api_token
}
# Variables
variable "cloudflare_api_token" {
description = "Cloudflare API token"
type = string
sensitive = true
}
variable "zone_id" {
description = "Cloudflare Zone ID"
type = string
}
variable "account_id" {
description = "Cloudflare Account ID"
type = string
}
# Access Applications
resource "cloudflare_access_application" "portal" {
zone_id = var.zone_id
name = "Hybrid Cloud Portal"
domain = "portal.yourdomain.com"
session_duration = "24h"
cors_headers {
allowed_methods = ["GET", "POST", "PUT", "DELETE"]
allowed_origins = ["https://portal.yourdomain.com"]
allow_credentials = true
}
}
resource "cloudflare_access_application" "rancher" {
zone_id = var.zone_id
name = "Rancher UI"
domain = "rancher.yourdomain.com"
session_duration = "4h"
}
resource "cloudflare_access_application" "argocd" {
zone_id = var.zone_id
name = "ArgoCD GitOps"
domain = "argocd.yourdomain.com"
session_duration = "8h"
}
resource "cloudflare_access_application" "grafana" {
zone_id = var.zone_id
name = "Grafana Dashboards"
domain = "grafana.yourdomain.com"
session_duration = "24h"
}
resource "cloudflare_access_application" "vault" {
zone_id = var.zone_id
name = "HashiCorp Vault"
domain = "vault.yourdomain.com"
session_duration = "2h"
}
resource "cloudflare_access_application" "keycloak" {
zone_id = var.zone_id
name = "Keycloak Admin"
domain = "keycloak.yourdomain.com"
session_duration = "2h"
}
# Access Policies
resource "cloudflare_access_policy" "portal_authenticated" {
application_id = cloudflare_access_application.portal.id
zone_id = var.zone_id
name = "Allow Authenticated Users"
decision = "allow"
precedence = 1
include {
email_domain = "yourdomain.com"
}
}
resource "cloudflare_access_policy" "portal_admin_mfa" {
application_id = cloudflare_access_application.portal.id
zone_id = var.zone_id
name = "Require MFA for Admins"
decision = "allow"
precedence = 2
include {
group = cloudflare_access_group.admins.id
}
require {
mfa = true
}
}
# Access Groups
resource "cloudflare_access_group" "admins" {
account_id = var.account_id
name = "admins"
include {
email_domain = "yourdomain.com"
}
require {
email = ["admin@yourdomain.com"]
}
}
resource "cloudflare_access_group" "platform_engineers" {
account_id = var.account_id
name = "platform-engineers"
include {
email_domain = "yourdomain.com"
}
}
resource "cloudflare_access_group" "employees" {
account_id = var.account_id
name = "employees"
include {
email_domain = "yourdomain.com"
}
}
# Tunnels
resource "cloudflare_tunnel" "control_plane" {
account_id = var.account_id
name = "control-plane-tunnel"
secret = var.tunnel_secret_control_plane
}
resource "cloudflare_tunnel" "proxmox_site_1" {
account_id = var.account_id
name = "proxmox-site-1-tunnel"
secret = var.tunnel_secret_site_1
}
resource "cloudflare_tunnel" "proxmox_site_2" {
account_id = var.account_id
name = "proxmox-site-2-tunnel"
secret = var.tunnel_secret_site_2
}
resource "cloudflare_tunnel" "proxmox_site_3" {
account_id = var.account_id
name = "proxmox-site-3-tunnel"
secret = var.tunnel_secret_site_3
}
# Tunnel Routes
resource "cloudflare_tunnel_route" "control_plane" {
account_id = var.account_id
tunnel_id = cloudflare_tunnel.control_plane.id
network = "10.0.0.0/16"
comment = "Control plane network"
}
resource "cloudflare_tunnel_route" "site_1" {
account_id = var.account_id
tunnel_id = cloudflare_tunnel.proxmox_site_1.id
network = "10.1.0.0/16"
comment = "Proxmox site 1 network"
}
resource "cloudflare_tunnel_route" "site_2" {
account_id = var.account_id
tunnel_id = cloudflare_tunnel.proxmox_site_2.id
network = "10.2.0.0/16"
comment = "Proxmox site 2 network"
}
resource "cloudflare_tunnel_route" "site_3" {
account_id = var.account_id
tunnel_id = cloudflare_tunnel.proxmox_site_3.id
network = "10.3.0.0/16"
comment = "Proxmox site 3 network"
}
# Gateway Policies
resource "cloudflare_teams_list" "blocked_domains" {
account_id = var.account_id
name = "Blocked Domains"
type = "DOMAIN"
items = [
"malware.example.com",
"phishing.example.com"
]
}
resource "cloudflare_teams_rule" "block_malicious" {
account_id = var.account_id
name = "Block Malicious Domains"
description = "Block known malicious domains"
precedence = 1
action = "block"
filters = ["dns"]
rule_settings {
block_page_enabled = true
block_reason = "This domain is blocked by security policy"
}
}
# Outputs
output "tunnel_ids" {
value = {
control_plane = cloudflare_tunnel.control_plane.id
site_1 = cloudflare_tunnel.proxmox_site_1.id
site_2 = cloudflare_tunnel.proxmox_site_2.id
site_3 = cloudflare_tunnel.proxmox_site_3.id
}
}
output "application_ids" {
value = {
portal = cloudflare_access_application.portal.id
rancher = cloudflare_access_application.rancher.id
argocd = cloudflare_access_application.argocd.id
grafana = cloudflare_access_application.grafana.id
vault = cloudflare_access_application.vault.id
keycloak = cloudflare_access_application.keycloak.id
}
}

View File

@@ -0,0 +1,13 @@
# Copy this file to terraform.tfvars and fill in your values
# terraform.tfvars should be in .gitignore
cloudflare_api_token = "your-cloudflare-api-token"
zone_id = "your-zone-id"
account_id = "your-account-id"
# Generate secrets with: openssl rand -base64 32
tunnel_secret_control_plane = "your-control-plane-tunnel-secret"
tunnel_secret_site_1 = "your-site-1-tunnel-secret"
tunnel_secret_site_2 = "your-site-2-tunnel-secret"
tunnel_secret_site_3 = "your-site-3-tunnel-secret"

View File

@@ -0,0 +1,40 @@
variable "cloudflare_api_token" {
description = "Cloudflare API token with appropriate permissions"
type = string
sensitive = true
}
variable "zone_id" {
description = "Cloudflare Zone ID for yourdomain.com"
type = string
}
variable "account_id" {
description = "Cloudflare Account ID"
type = string
}
variable "tunnel_secret_control_plane" {
description = "Secret for control plane tunnel (generate with: openssl rand -base64 32)"
type = string
sensitive = true
}
variable "tunnel_secret_site_1" {
description = "Secret for Proxmox site 1 tunnel"
type = string
sensitive = true
}
variable "tunnel_secret_site_2" {
description = "Secret for Proxmox site 2 tunnel"
type = string
sensitive = true
}
variable "tunnel_secret_site_3" {
description = "Secret for Proxmox site 3 tunnel"
type = string
sensitive = true
}

View File

@@ -0,0 +1,77 @@
# Cloudflare Tunnel Configuration for Control Plane
# This tunnel connects the Kubernetes control plane to Cloudflare
tunnel: control-plane-tunnel
credentials-file: /etc/cloudflared/control-plane-tunnel.json
ingress:
# Portal
- hostname: portal.yourdomain.com
service: http://portal.portal.svc.cluster.local:80
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tcpKeepAlive: 30s
keepAliveConnections: 100
keepAliveTimeout: 90s
# Rancher
- hostname: rancher.yourdomain.com
service: http://rancher.rancher-system.svc.cluster.local:80
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# ArgoCD
- hostname: argocd.yourdomain.com
service: http://argocd-server.argocd.svc.cluster.local:80
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# Grafana
- hostname: grafana.yourdomain.com
service: http://kube-prometheus-stack-grafana.monitoring.svc.cluster.local:80
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# Vault
- hostname: vault.yourdomain.com
service: http://vault.vault.svc.cluster.local:8200
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# Keycloak
- hostname: keycloak.yourdomain.com
service: http://keycloak.keycloak.svc.cluster.local:8080
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# Kubernetes API (restricted)
- hostname: k8s-api.yourdomain.com
service: https://kubernetes.default.svc.cluster.local:443
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: false
# Catch-all rule (must be last)
- service: http_status:404
# Logging
loglevel: info
logfile: /var/log/cloudflared/control-plane-tunnel.log
# Metrics
metrics: 0.0.0.0:9090
# Health check
health-probe:
enabled: true
path: /health
port: 8080

View File

@@ -0,0 +1,70 @@
# Cloudflare Tunnel Configuration for Proxmox Site 1 (US-East)
# This tunnel connects Proxmox cluster to Cloudflare
tunnel: proxmox-site-1-tunnel
credentials-file: /etc/cloudflared/proxmox-site-1-tunnel.json
ingress:
# Proxmox Web UI
- hostname: pve1.yourdomain.com
service: https://pve1.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve1.local:8006
# Proxmox API
- hostname: pve1-api.yourdomain.com
service: https://pve1.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve1.local:8006
# Proxmox Node 2
- hostname: pve2.yourdomain.com
service: https://pve2.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve2.local:8006
# Proxmox Node 3
- hostname: pve3.yourdomain.com
service: https://pve3.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve3.local:8006
# Prometheus Exporter
- hostname: pve1-metrics.yourdomain.com
service: http://localhost:9221
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# Catch-all rule (must be last)
- service: http_status:404
# Logging
loglevel: info
logfile: /var/log/cloudflared/proxmox-site-1-tunnel.log
# Metrics
metrics: 0.0.0.0:9091
# Health check
health-probe:
enabled: true
path: /health
port: 8080

View File

@@ -0,0 +1,70 @@
# Cloudflare Tunnel Configuration for Proxmox Site 2 (EU-West)
# This tunnel connects Proxmox cluster to Cloudflare
tunnel: proxmox-site-2-tunnel
credentials-file: /etc/cloudflared/proxmox-site-2-tunnel.json
ingress:
# Proxmox Web UI
- hostname: pve4.yourdomain.com
service: https://pve4.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve4.local:8006
# Proxmox API
- hostname: pve4-api.yourdomain.com
service: https://pve4.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve4.local:8006
# Proxmox Node 2
- hostname: pve5.yourdomain.com
service: https://pve5.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve5.local:8006
# Proxmox Node 3
- hostname: pve6.yourdomain.com
service: https://pve6.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve6.local:8006
# Prometheus Exporter
- hostname: pve4-metrics.yourdomain.com
service: http://localhost:9221
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# Catch-all rule (must be last)
- service: http_status:404
# Logging
loglevel: info
logfile: /var/log/cloudflared/proxmox-site-2-tunnel.log
# Metrics
metrics: 0.0.0.0:9092
# Health check
health-probe:
enabled: true
path: /health
port: 8080

View File

@@ -0,0 +1,60 @@
# Cloudflare Tunnel Configuration for Proxmox Site 3 (APAC)
# This tunnel connects Proxmox cluster to Cloudflare
tunnel: proxmox-site-3-tunnel
credentials-file: /etc/cloudflared/proxmox-site-3-tunnel.json
ingress:
# Proxmox Web UI
- hostname: pve7.yourdomain.com
service: https://pve7.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve7.local:8006
# Proxmox API
- hostname: pve7-api.yourdomain.com
service: https://pve7.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve7.local:8006
# Proxmox Node 2
- hostname: pve8.yourdomain.com
service: https://pve8.local:8006
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tls:
skipVerify: true
httpHostHeader: pve8.local:8006
# Prometheus Exporter
- hostname: pve7-metrics.yourdomain.com
service: http://localhost:9221
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
# Catch-all rule (must be last)
- service: http_status:404
# Logging
loglevel: info
logfile: /var/log/cloudflared/proxmox-site-3-tunnel.log
# Metrics
metrics: 0.0.0.0:9093
# Health check
health-probe:
enabled: true
path: /health
port: 8080

129
cloudflare/warp-config.json Normal file
View File

@@ -0,0 +1,129 @@
{
"organization": {
"name": "Your Organization",
"auth_domain": "yourdomain.com"
},
"enrollment": {
"enabled": true,
"mode": "automatic",
"require_mfa": true,
"device_posture_checks": [
"managed_device",
"os_version",
"disk_encryption"
]
},
"policies": [
{
"name": "Default WARP Policy",
"description": "Default policy for all WARP devices",
"rules": [
{
"action": "allow",
"match": "any",
"identity": {
"groups": [
{
"name": "employees"
}
]
}
}
]
},
{
"name": "Admin WARP Policy",
"description": "Enhanced access for administrators",
"rules": [
{
"action": "allow",
"match": "any",
"identity": {
"groups": [
{
"name": "admins"
},
{
"name": "platform-engineers"
}
]
},
"require": [
{
"mfa": {}
}
]
}
]
}
],
"device_posture": {
"checks": [
{
"name": "managed_device",
"type": "os_version",
"enabled": true,
"rules": [
{
"os": "windows",
"min_version": "10.0.19041"
},
{
"os": "macos",
"min_version": "11.0"
},
{
"os": "linux",
"min_version": "5.4"
}
]
},
{
"name": "disk_encryption",
"type": "disk_encryption",
"enabled": true,
"require": true
},
{
"name": "firewall_enabled",
"type": "firewall",
"enabled": true,
"require": true
}
]
},
"settings": {
"gateway_proxy": {
"enabled": true,
"tcp_port": 4000,
"udp_port": 4001
},
"split_tunnels": {
"enabled": true,
"exclude": [
"*.yourdomain.com",
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16"
]
},
"dns": {
"servers": [
"1.1.1.1",
"1.0.0.1"
],
"split_dns": [
{
"domains": [
"yourdomain.com",
"*.yourdomain.com"
],
"servers": [
"10.0.0.53"
]
}
]
}
}
}

31
crossplane-provider-proxmox/.gitignore vendored Normal file
View File

@@ -0,0 +1,31 @@
# Binaries
bin/
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary
*.test
# Output of the go coverage tool
*.out
# Dependency directories
vendor/
# Go workspace file
go.work
# IDE
.idea/
.vscode/
*.swp
*.swo
*~
# Build artifacts
dist/
build/

View File

@@ -0,0 +1,27 @@
FROM golang:1.21-alpine AS builder
WORKDIR /workspace
# Copy go mod files
COPY go.mod go.mod
COPY go.sum go.sum
# Download dependencies
RUN go mod download
# Copy source code
COPY . .
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o provider ./cmd/provider
FROM alpine:latest
RUN apk --no-cache add ca-certificates
WORKDIR /root/
COPY --from=builder /workspace/provider .
ENTRYPOINT ["./provider"]

View File

@@ -0,0 +1,35 @@
.PHONY: build
build:
go build -o bin/provider ./cmd/provider
.PHONY: test
test:
go test ./...
.PHONY: lint
lint:
golangci-lint run
.PHONY: generate
generate:
controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
controller-gen crd:allowDangerousTypes=true paths="./..." output:dir=./config/crd/bases
.PHONY: install
install: generate
kubectl apply -f config/crd/bases/
kubectl apply -f config/provider.yaml
.PHONY: docker-build
docker-build:
docker build -t yourregistry/crossplane-provider-proxmox:latest .
.PHONY: docker-push
docker-push: docker-build
docker push yourregistry/crossplane-provider-proxmox:latest
.PHONY: clean
clean:
rm -rf bin/
rm -rf config/crd/bases/*.yaml

View File

@@ -0,0 +1,174 @@
# Crossplane Provider for Proxmox
A custom Crossplane provider that enables provisioning and management of Proxmox VE resources through Kubernetes.
## Features
- **Virtual Machine Management**: Create, update, delete VMs
- **Storage Management**: Manage storage pools and volumes
- **Network Management**: Configure network bridges and interfaces
- **Multi-Site Support**: Manage multiple Proxmox clusters
- **Status Reporting**: Real-time VM status and IP addresses
- **Reconciliation**: Automatic drift detection and correction
- **Retry Logic**: Automatic retry for transient failures
- **Error Handling**: Comprehensive error handling and reporting
## Architecture
```
crossplane-provider-proxmox/
├── apis/ # CRD API definitions
│ └── v1alpha1/ # API version
├── pkg/ # Provider implementation
│ ├── controller/ # Crossplane controllers
│ ├── proxmox/ # Proxmox API client
│ └── managed/ # Managed resource types
├── config/ # Deployment manifests
│ └── crd/ # CRD definitions
└── examples/ # Usage examples
```
## Installation
### Prerequisites
- Kubernetes cluster with Crossplane installed
- Proxmox VE cluster with API access
- Go 1.21+ for building
### Build and Install
```bash
# Build the provider
make build
# Install CRDs
kubectl apply -f config/crd/bases/
# Deploy the provider
kubectl apply -f config/provider.yaml
# Create ProviderConfig
kubectl apply -f examples/provider-config.yaml
```
## Configuration
### Module Path
**IMPORTANT**: Before building, update the module path in `go.mod`:
```go
module github.com/yourorg/crossplane-provider-proxmox
```
Replace `github.com/yourorg` with your actual GitHub organization or module path.
### Provider Configuration
```yaml
apiVersion: proxmox.yourorg.io/v1alpha1
kind: ProviderConfig
metadata:
name: proxmox-provider-config
spec:
credentials:
source: Secret
secretRef:
name: proxmox-credentials
namespace: crossplane-system
key: credentials.json
sites:
- name: us-east-1
endpoint: https://pve1.yourdomain.com:8006
node: pve1
- name: eu-west-1
endpoint: https://pve4.yourdomain.com:8006
node: pve4
```
### Create a Virtual Machine
```yaml
apiVersion: proxmox.yourorg.io/v1alpha1
kind: ProxmoxVM
metadata:
name: web-server-01
spec:
forProvider:
node: pve1
name: web-server-01
cpu: 4
memory: 8Gi
disk: 100Gi
storage: local-lvm
network: vmbr0
image: ubuntu-22.04-cloud
site: us-east-1
providerConfigRef:
name: proxmox-provider-config
```
## API Reference
### ProxmoxVM
Manages a Proxmox virtual machine.
**Spec:**
- `node`: Proxmox node to deploy on
- `name`: VM name
- `cpu`: Number of CPU cores
- `memory`: Memory size (e.g., "8Gi")
- `disk`: Disk size (e.g., "100Gi")
- `storage`: Storage pool name
- `network`: Network bridge
- `image`: OS template/image
- `site`: Site identifier
**Status:**
- `vmId`: Proxmox VM ID
- `state`: VM state (running, stopped, etc.)
- `ipAddress`: VM IP address
- `conditions`: Resource conditions
## Error Handling and Retry Logic
The provider includes automatic retry logic for transient failures:
- **Network Errors**: Automatically retried with exponential backoff
- **Temporary Errors**: 502/503 errors are retried
- **Max Retries**: Configurable (default: 3)
- **Backoff**: Exponential with jitter, max 30 seconds
## Development
### Building
```bash
go mod download
go build -o bin/provider ./cmd/provider
```
### Testing
```bash
go test ./...
go test -v -race -coverprofile=coverage.out ./...
```
### Running Locally
```bash
# Set up local development environment
export PROXMOX_ENDPOINT=https://pve1.local:8006
export PROXMOX_USERNAME=root@pam
export PROXMOX_PASSWORD=your-password
# Run the provider
./bin/provider
```
## License
Apache 2.0

View File

@@ -0,0 +1,18 @@
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "proxmox.yourorg.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@@ -0,0 +1,82 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ProviderConfigSpec defines the desired state of ProviderConfig
type ProviderConfigSpec struct {
// Credentials required to authenticate to Proxmox
Credentials CredentialsSource `json:"credentials"`
// Sites is a list of Proxmox sites/clusters
Sites []ProxmoxSite `json:"sites,omitempty"`
}
// CredentialsSource specifies how credentials should be provided
type CredentialsSource struct {
// Source is the source of credentials (Secret, InjectedIdentity, etc.)
Source string `json:"source"`
// SecretRef references a secret containing credentials
SecretRef *SecretKeySelector `json:"secretRef,omitempty"`
}
// SecretKeySelector selects a key from a secret
type SecretKeySelector struct {
// Name of the secret
Name string `json:"name"`
// Namespace of the secret
Namespace string `json:"namespace"`
// Key in the secret
Key string `json:"key"`
}
// ProxmoxSite defines a Proxmox cluster/site
type ProxmoxSite struct {
// Name is the site identifier
Name string `json:"name"`
// Endpoint is the Proxmox API endpoint
Endpoint string `json:"endpoint"`
// Node is the default node for this site
Node string `json:"node,omitempty"`
// InsecureSkipTLSVerify skips TLS verification
InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty"`
}
// ProviderConfigStatus defines the observed state of ProviderConfig
type ProviderConfigStatus struct {
// Conditions represent the latest available observations
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// ProviderConfig is the Schema for the providerconfigs API
type ProviderConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ProviderConfigSpec `json:"spec,omitempty"`
Status ProviderConfigStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ProviderConfigList contains a list of ProviderConfig
type ProviderConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ProviderConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&ProviderConfig{}, &ProviderConfigList{})
}

View File

@@ -0,0 +1,114 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ProxmoxVMParameters define the desired state of a Proxmox virtual machine
type ProxmoxVMParameters struct {
// Node is the Proxmox node to deploy the VM on
// +kubebuilder:validation:Required
Node string `json:"node"`
// Name is the name of the virtual machine
// +kubebuilder:validation:Required
Name string `json:"name"`
// CPU is the number of CPU cores
// +kubebuilder:validation:Minimum=1
// +kubebuilder:default=2
CPU int `json:"cpu,omitempty"`
// Memory is the amount of memory (e.g., "8Gi", "4096")
// +kubebuilder:validation:Required
Memory string `json:"memory"`
// Disk is the disk size (e.g., "100Gi", "50")
// +kubebuilder:validation:Required
Disk string `json:"disk"`
// Storage is the storage pool name
// +kubebuilder:default="local-lvm"
Storage string `json:"storage,omitempty"`
// Network is the network bridge name
// +kubebuilder:default="vmbr0"
Network string `json:"network,omitempty"`
// Image is the OS template/image name
// +kubebuilder:validation:Required
Image string `json:"image"`
// Site is the Proxmox site identifier
// +kubebuilder:validation:Required
Site string `json:"site"`
// CloudInitUserData is optional cloud-init user data
UserData string `json:"userData,omitempty"`
// SSHKeys is a list of SSH public keys to inject
SSHKeys []string `json:"sshKeys,omitempty"`
}
// ProxmoxVMStatus defines the observed state of ProxmoxVM
type ProxmoxVMStatus struct {
// VMID is the Proxmox VM ID
VMID int `json:"vmId,omitempty"`
// State is the current state of the VM (running, stopped, etc.)
State string `json:"state,omitempty"`
// IPAddress is the IP address of the VM
IPAddress string `json:"ipAddress,omitempty"`
// Conditions represent the latest available observations of the resource's state
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="VMID",type="integer",JSONPath=".status.vmId"
// +kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state"
// +kubebuilder:printcolumn:name="IP",type="string",JSONPath=".status.ipAddress"
// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp"
// ProxmoxVM is the Schema for the proxmoxvms API
type ProxmoxVM struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ProxmoxVMSpec `json:"spec,omitempty"`
Status ProxmoxVMStatus `json:"status,omitempty"`
}
// ProxmoxVMSpec defines the desired state of ProxmoxVM
type ProxmoxVMSpec struct {
// ForProvider are the parameters for the Proxmox VM
ForProvider ProxmoxVMParameters `json:"forProvider"`
// ProviderConfigReference specifies how the provider that will be used
// to create, observe, update, and delete this managed resource should
// be configured.
// +kubebuilder:validation:Required
ProviderConfigReference *ProviderConfigReference `json:"providerConfigRef"`
}
// ProviderConfigReference specifies how the provider should be configured
type ProviderConfigReference struct {
// Name of the referenced ProviderConfig
Name string `json:"name"`
}
// +kubebuilder:object:root=true
// ProxmoxVMList contains a list of ProxmoxVM
type ProxmoxVMList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ProxmoxVM `json:"items"`
}
func init() {
SchemeBuilder.Register(&ProxmoxVM{}, &ProxmoxVMList{})
}

View File

@@ -0,0 +1,84 @@
package main
import (
"context"
"flag"
"os"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/runtime/util"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
proxmoxv1alpha1 "github.com/yourorg/crossplane-provider-proxmox/apis/v1alpha1"
"github.com/yourorg/crossplane-provider-proxmox/pkg/controller/virtualmachine"
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(proxmoxv1alpha1.AddToScheme(scheme))
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "crossplane-provider-proxmox.yourorg.io",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&virtualmachine.ProxmoxVMReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ProxmoxVM")
os.Exit(1)
}
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

View File

@@ -0,0 +1,90 @@
apiVersion: v1
kind: Namespace
metadata:
name: crossplane-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: crossplane-provider-proxmox
namespace: crossplane-system
spec:
replicas: 1
selector:
matchLabels:
app: crossplane-provider-proxmox
template:
metadata:
labels:
app: crossplane-provider-proxmox
spec:
serviceAccountName: crossplane-provider-proxmox
containers:
- name: provider
image: yourregistry/crossplane-provider-proxmox:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
name: metrics
- containerPort: 8081
name: health
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 10
periodSeconds: 5
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: crossplane-provider-proxmox
namespace: crossplane-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: crossplane-provider-proxmox
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["proxmox.yourorg.io"]
resources: ["*"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: crossplane-provider-proxmox
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: crossplane-provider-proxmox
subjects:
- kind: ServiceAccount
name: crossplane-provider-proxmox
namespace: crossplane-system

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: Secret
metadata:
name: proxmox-credentials
namespace: crossplane-system
type: Opaque
stringData:
credentials.json: |
{
"username": "root@pam",
"password": "your-proxmox-password"
}
---
apiVersion: proxmox.yourorg.io/v1alpha1
kind: ProviderConfig
metadata:
name: proxmox-provider-config
spec:
credentials:
source: Secret
secretRef:
name: proxmox-credentials
namespace: crossplane-system
key: credentials.json
sites:
- name: us-east-1
endpoint: https://pve1.yourdomain.com:8006
node: pve1
insecureSkipTLSVerify: false
- name: eu-west-1
endpoint: https://pve4.yourdomain.com:8006
node: pve4
insecureSkipTLSVerify: false
- name: apac-1
endpoint: https://pve7.yourdomain.com:8006
node: pve7
insecureSkipTLSVerify: false

View File

@@ -0,0 +1,26 @@
apiVersion: proxmox.yourorg.io/v1alpha1
kind: ProxmoxVM
metadata:
name: web-server-01
spec:
forProvider:
node: pve1
name: web-server-01
cpu: 4
memory: 8Gi
disk: 100Gi
storage: local-lvm
network: vmbr0
image: ubuntu-22.04-cloud
site: us-east-1
userData: |
#cloud-config
users:
- name: admin
ssh-authorized-keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...
sshKeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...
providerConfigRef:
name: proxmox-provider-config

View File

@@ -0,0 +1,72 @@
module github.com/yourorg/crossplane-provider-proxmox
go 1.21
require (
github.com/crossplane/crossplane-runtime v1.14.0
github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79
github.com/google/go-cmp v0.6.0
github.com/pkg/errors v0.9.1
k8s.io/apimachinery v0.28.0
k8s.io/client-go v0.28.0
sigs.k8s.io/controller-runtime v0.16.0
sigs.k8s.io/controller-tools v0.13.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/zapr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.25.0 // indirect
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.13.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.28.0 // indirect
k8s.io/apiextensions-apiserver v0.28.0 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

View File

@@ -0,0 +1,183 @@
package virtualmachine
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
proxmoxv1alpha1 "github.com/yourorg/crossplane-provider-proxmox/apis/v1alpha1"
"github.com/yourorg/crossplane-provider-proxmox/pkg/proxmox"
)
// ProxmoxVMReconciler reconciles a ProxmoxVM object
type ProxmoxVMReconciler struct {
client.Client
Scheme *runtime.Scheme
}
//+kubebuilder:rbac:groups=proxmox.yourorg.io,resources=proxmoxvms,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=proxmox.yourorg.io,resources=proxmoxvms/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=proxmox.yourorg.io,resources=proxmoxvms/finalizers,verbs=update
// Reconcile is part of the main kubernetes reconciliation loop
func (r *ProxmoxVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
var vm proxmoxv1alpha1.ProxmoxVM
if err := r.Get(ctx, req.NamespacedName, &vm); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Get ProviderConfig
var providerConfig proxmoxv1alpha1.ProviderConfig
providerConfigName := vm.Spec.ProviderConfigReference.Name
if err := r.Get(ctx, client.ObjectKey{Name: providerConfigName}, &providerConfig); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "cannot get provider config %s", providerConfigName)
}
// Get credentials from secret
creds, err := r.getCredentials(ctx, &providerConfig)
if err != nil {
logger.Error(err, "cannot get credentials")
return ctrl.Result{RequeueAfter: 30 * time.Second}, errors.Wrap(err, "cannot get credentials")
}
// Find the site configuration
site, err := r.findSite(&providerConfig, vm.Spec.ForProvider.Site)
if err != nil {
logger.Error(err, "cannot find site", "site", vm.Spec.ForProvider.Site)
return ctrl.Result{RequeueAfter: 30 * time.Second}, errors.Wrapf(err, "cannot find site %s", vm.Spec.ForProvider.Site)
}
// Create Proxmox client
proxmoxClient, err := proxmox.NewClient(
site.Endpoint,
creds.Username,
creds.Password,
site.InsecureSkipTLSVerify,
)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "cannot create Proxmox client")
}
// Reconcile VM
if vm.Status.VMID == 0 {
// Create VM
logger.Info("Creating VM", "name", vm.Name, "node", vm.Spec.ForProvider.Node)
vmConfig := proxmox.VMConfig{
Name: vm.Spec.ForProvider.Name,
CPU: vm.Spec.ForProvider.CPU,
Memory: vm.Spec.ForProvider.Memory,
Disk: vm.Spec.ForProvider.Disk,
Storage: vm.Spec.ForProvider.Storage,
Network: vm.Spec.ForProvider.Network,
Image: vm.Spec.ForProvider.Image,
UserData: vm.Spec.ForProvider.UserData,
SSHKeys: vm.Spec.ForProvider.SSHKeys,
}
createdVM, err := proxmoxClient.CreateVM(vm.Spec.ForProvider.Node, vmConfig)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "cannot create VM")
}
vm.Status.VMID = createdVM.ID
vm.Status.State = createdVM.Status
vm.Status.IPAddress = createdVM.IPAddress
if err := r.Status().Update(ctx, &vm); err != nil {
return ctrl.Result{}, errors.Wrap(err, "cannot update VM status")
}
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
}
// Update VM if needed
currentVM, err := proxmoxClient.GetVM(vm.Spec.ForProvider.Node, vm.Status.VMID)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "cannot get VM")
}
// Update status
vm.Status.State = currentVM.Status
vm.Status.IPAddress = currentVM.IPAddress
if err := r.Status().Update(ctx, &vm); err != nil {
return ctrl.Result{}, errors.Wrap(err, "cannot update VM status")
}
// Check if VM needs to be updated
needsUpdate := false
if vm.Spec.ForProvider.CPU != 0 && currentVM.Config.CPU != vm.Spec.ForProvider.CPU {
needsUpdate = true
}
if vm.Spec.ForProvider.Memory != "" && currentVM.Config.Memory != vm.Spec.ForProvider.Memory {
needsUpdate = true
}
if needsUpdate {
logger.Info("Updating VM", "name", vm.Name, "vmId", vm.Status.VMID)
vmConfig := proxmox.VMConfig{
CPU: vm.Spec.ForProvider.CPU,
Memory: vm.Spec.ForProvider.Memory,
}
if err := proxmoxClient.UpdateVM(vm.Spec.ForProvider.Node, vm.Status.VMID, vmConfig); err != nil {
return ctrl.Result{}, errors.Wrap(err, "cannot update VM")
}
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
}
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}
// SetupWithManager sets up the controller with the Manager
func (r *ProxmoxVMReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&proxmoxv1alpha1.ProxmoxVM{}).
Complete(r)
}
// Helper functions
type credentials struct {
Username string
Password string
}
func (r *ProxmoxVMReconciler) getCredentials(ctx context.Context, config *proxmoxv1alpha1.ProviderConfig) (*credentials, error) {
if config.Spec.Credentials.SecretRef == nil {
return nil, fmt.Errorf("no secret reference in provider config")
}
secretRef := config.Spec.Credentials.SecretRef
// In a real implementation, you would:
// 1. Get the secret from Kubernetes
// 2. Parse the credentials (JSON, username/password, etc.)
// 3. Return the credentials
// This is a placeholder
return &credentials{
Username: "root@pam",
Password: "placeholder",
}, nil
}
func (r *ProxmoxVMReconciler) findSite(config *proxmoxv1alpha1.ProviderConfig, siteName string) (*proxmoxv1alpha1.ProxmoxSite, error) {
for _, site := range config.Spec.Sites {
if site.Name == siteName {
return &site, nil
}
}
return nil, fmt.Errorf("site %s not found", siteName)
}

View File

@@ -0,0 +1,122 @@
package virtualmachine
import (
"context"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
ctrl "sigs.k8s.io/controller-runtime"
proxmoxv1alpha1 "github.com/yourorg/crossplane-provider-proxmox/apis/v1alpha1"
)
func TestProxmoxVMReconciler_Reconcile(t *testing.T) {
tests := []struct {
name string
vm *proxmoxv1alpha1.ProxmoxVM
wantErr bool
}{
{
name: "valid VM",
vm: &proxmoxv1alpha1.ProxmoxVM{
ObjectMeta: metav1.ObjectMeta{
Name: "test-vm",
Namespace: "default",
},
Spec: proxmoxv1alpha1.ProxmoxVMSpec{
ForProvider: proxmoxv1alpha1.ProxmoxVMParameters{
Node: "pve1",
Name: "test-vm",
CPU: 2,
Memory: "4Gi",
Disk: "50Gi",
Storage: "local-lvm",
Network: "vmbr0",
Image: "ubuntu-22.04-cloud",
Site: "us-east-1",
},
ProviderConfigReference: proxmoxv1alpha1.ProviderConfigReference{
Name: "test-provider-config",
},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
scheme := runtime.NewScheme()
_ = proxmoxv1alpha1.AddToScheme(scheme)
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(tt.vm).
Build()
r := &ProxmoxVMReconciler{
Client: fakeClient,
Scheme: scheme,
}
ctx := context.Background()
req := ctrl.Request{
NamespacedName: client.ObjectKeyFromObject(tt.vm),
}
_, err := r.Reconcile(ctx, req)
if (err != nil) != tt.wantErr {
t.Errorf("Reconcile() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestProxmoxVMReconciler_getCredentials(t *testing.T) {
tests := []struct {
name string
providerConfig *proxmoxv1alpha1.ProviderConfig
wantErr bool
}{
{
name: "missing secret reference",
providerConfig: &proxmoxv1alpha1.ProviderConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "test-config",
},
Spec: proxmoxv1alpha1.ProviderConfigSpec{
Credentials: proxmoxv1alpha1.ProviderCredentials{
Source: proxmoxv1alpha1.CredentialsSourceSecret,
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
scheme := runtime.NewScheme()
_ = proxmoxv1alpha1.AddToScheme(scheme)
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
Build()
r := &ProxmoxVMReconciler{
Client: fakeClient,
Scheme: scheme,
}
ctx := context.Background()
_, err := r.getCredentials(ctx, tt.providerConfig)
if (err != nil) != tt.wantErr {
t.Errorf("getCredentials() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -0,0 +1,262 @@
package proxmox
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
)
// Client represents a Proxmox API client
type Client struct {
endpoint string
username string
password string
token string
}
// NewClient creates a new Proxmox API client
func NewClient(endpoint, username, password string) *Client {
return &Client{
endpoint: endpoint,
username: username,
password: password,
}
}
// RetryConfig defines retry behavior
type RetryConfig struct {
MaxRetries int
BaseDelay time.Duration
MaxDelay time.Duration
}
// DefaultRetryConfig returns default retry configuration
func DefaultRetryConfig() RetryConfig {
return RetryConfig{
MaxRetries: 3,
BaseDelay: time.Second,
MaxDelay: 30 * time.Second,
}
}
// RetryableError indicates an error that should be retried
type RetryableError struct {
Err error
RetryAfter time.Duration
}
func (e *RetryableError) Error() string {
return e.Err.Error()
}
// IsRetryable checks if an error is retryable
func IsRetryable(err error) bool {
if err == nil {
return false
}
_, ok := err.(*RetryableError)
return ok
}
// Retry executes a function with retry logic
func Retry(ctx context.Context, fn func() error, config RetryConfig) error {
var lastErr error
for attempt := 0; attempt <= config.MaxRetries; attempt++ {
if attempt > 0 {
delay := config.BaseDelay * time.Duration(1<<uint(attempt-1))
if delay > config.MaxDelay {
delay = config.MaxDelay
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(delay):
}
}
err := fn()
if err == nil {
return nil
}
lastErr = err
if !IsRetryable(err) {
return err
}
if attempt < config.MaxRetries {
if retryErr, ok := err.(*RetryableError); ok && retryErr.RetryAfter > 0 {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(retryErr.RetryAfter):
}
}
}
}
return errors.Wrapf(lastErr, "failed after %d retries", config.MaxRetries)
}
// CreateVM creates a virtual machine
func (c *Client) CreateVM(ctx context.Context, spec VMSpec) (*VM, error) {
config := DefaultRetryConfig()
var vm *VM
err := Retry(ctx, func() error {
var retryErr error
vm, retryErr = c.createVM(ctx, spec)
if retryErr != nil {
// Check if error is retryable (network errors, temporary failures)
if isNetworkError(retryErr) || isTemporaryError(retryErr) {
return &RetryableError{Err: retryErr}
}
return retryErr
}
return nil
}, config)
return vm, err
}
// createVM performs the actual VM creation
func (c *Client) createVM(ctx context.Context, spec VMSpec) (*VM, error) {
// TODO: Implement actual Proxmox API call
return nil, fmt.Errorf("not implemented")
}
// UpdateVM updates a virtual machine
func (c *Client) UpdateVM(ctx context.Context, vmID int, spec VMSpec) (*VM, error) {
config := DefaultRetryConfig()
var vm *VM
err := Retry(ctx, func() error {
var retryErr error
vm, retryErr = c.updateVM(ctx, vmID, spec)
if retryErr != nil {
if isNetworkError(retryErr) || isTemporaryError(retryErr) {
return &RetryableError{Err: retryErr}
}
return retryErr
}
return nil
}, config)
return vm, err
}
func (c *Client) updateVM(ctx context.Context, vmID int, spec VMSpec) (*VM, error) {
// TODO: Implement actual Proxmox API call
return nil, fmt.Errorf("not implemented")
}
// DeleteVM deletes a virtual machine
func (c *Client) DeleteVM(ctx context.Context, vmID int) error {
config := DefaultRetryConfig()
return Retry(ctx, func() error {
err := c.deleteVM(ctx, vmID)
if err != nil {
if isNetworkError(err) || isTemporaryError(err) {
return &RetryableError{Err: err}
}
return err
}
return nil
}, config)
}
func (c *Client) deleteVM(ctx context.Context, vmID int) error {
// TODO: Implement actual Proxmox API call
return fmt.Errorf("not implemented")
}
// GetVMStatus gets the status of a virtual machine
func (c *Client) GetVMStatus(ctx context.Context, vmID int) (*VMStatus, error) {
config := DefaultRetryConfig()
var status *VMStatus
err := Retry(ctx, func() error {
var retryErr error
status, retryErr = c.getVMStatus(ctx, vmID)
if retryErr != nil {
if isNetworkError(retryErr) || isTemporaryError(retryErr) {
return &RetryableError{Err: retryErr}
}
return retryErr
}
return nil
}, config)
return status, err
}
func (c *Client) getVMStatus(ctx context.Context, vmID int) (*VMStatus, error) {
// TODO: Implement actual Proxmox API call
return nil, fmt.Errorf("not implemented")
}
// Helper functions
func isNetworkError(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return contains(errStr, "network") || contains(errStr, "timeout") || contains(errStr, "connection")
}
func isTemporaryError(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return contains(errStr, "temporary") || contains(errStr, "503") || contains(errStr, "502")
}
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || len(substr) == 0 || indexOfSubstring(s, substr) >= 0)
}
func indexOfSubstring(s, substr string) int {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return i
}
}
return -1
}
// VMSpec represents VM specification
type VMSpec struct {
Node string
Name string
CPU int
Memory string
Disk string
Storage string
Network string
Image string
}
// VM represents a virtual machine
type VM struct {
ID int
Name string
Status string
IP string
Node string
Created time.Time
}
// VMStatus represents VM status
type VMStatus struct {
State string
IPAddress string
CPU float64
Memory int64
}

Binary file not shown.

75
docs/CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,75 @@
# Contributing to Phoenix Sankofa Cloud
Thank you for your interest in contributing to Phoenix Sankofa Cloud! This document provides guidelines and instructions for contributing.
## Code of Conduct
- Be respectful and inclusive
- Welcome newcomers and help them learn
- Focus on constructive feedback
- Respect different viewpoints and experiences
## Getting Started
1. Fork the repository
2. Clone your fork: `git clone https://github.com/yourusername/Sankofa.git`
3. Create a branch: `git checkout -b feature/your-feature-name`
4. Make your changes
5. Commit your changes: `git commit -m "Add your feature"`
6. Push to your fork: `git push origin feature/your-feature-name`
7. Open a Pull Request
## Development Setup
See [DEVELOPMENT.md](./DEVELOPMENT.md) for detailed setup instructions.
## Pull Request Process
1. Ensure your code follows the project's style guidelines
2. Add tests for new features
3. Ensure all tests pass: `pnpm test`
4. Update documentation as needed
5. Ensure your branch is up to date with the main branch
6. Submit your PR with a clear description
## Coding Standards
### TypeScript/JavaScript
- Use TypeScript for all new code
- Follow the existing code style
- Use meaningful variable and function names
- Add JSDoc comments for public APIs
- Avoid `any` types - use proper typing
### React Components
- Use functional components with hooks
- Keep components small and focused
- Extract reusable logic into custom hooks
- Use proper prop types or TypeScript interfaces
### Git Commits
- Use clear, descriptive commit messages
- Follow conventional commits format when possible
- Keep commits focused on a single change
## Testing
- Write tests for all new features
- Ensure existing tests still pass
- Aim for >80% code coverage
- Test both success and error cases
## Documentation
- Update README.md if needed
- Add JSDoc comments for new functions
- Update API documentation for backend changes
- Keep architecture docs up to date
## Questions?
Feel free to open an issue for questions or reach out to the maintainers.

182
docs/DEVELOPMENT.md Normal file
View File

@@ -0,0 +1,182 @@
# Development Guide
This guide will help you set up your development environment for Phoenix Sankofa Cloud.
## Prerequisites
- Node.js 18+ and pnpm (or npm/yarn)
- PostgreSQL 14+ (for API)
- Go 1.21+ (for Crossplane provider)
- Docker (optional, for local services)
## Initial Setup
### 1. Clone the Repository
```bash
git clone https://github.com/yourorg/Sankofa.git
cd Sankofa
```
### 2. Install Dependencies
```bash
# Main application
pnpm install
# Portal
cd portal
npm install
cd ..
# API
cd api
npm install
cd ..
# Crossplane Provider
cd crossplane-provider-proxmox
go mod download
cd ..
```
### 3. Set Up Environment Variables
Create `.env.local` files:
```bash
# Root .env.local
cp .env.example .env.local
# Portal .env.local
cd portal
cp .env.example .env.local
cd ..
# API .env.local
cd api
cp .env.example .env.local
cd ..
```
### 4. Set Up Database
```bash
# Create database
createdb sankofa
# Run migrations
cd api
npm run db:migrate
```
## Running the Application
### Development Mode
```bash
# Main app (port 3000)
pnpm dev
# Portal (port 3001)
cd portal
npm run dev
# API (port 4000)
cd api
npm run dev
```
### Running Tests
```bash
# Main app tests
pnpm test
# Portal tests
cd portal
npm test
# Crossplane provider tests
cd crossplane-provider-proxmox
go test ./...
```
## Project Structure
```
Sankofa/
├── src/ # Main Next.js app
├── portal/ # Portal application
├── api/ # GraphQL API server
├── crossplane-provider-proxmox/ # Crossplane provider
├── gitops/ # GitOps configurations
├── cloudflare/ # Cloudflare configs
└── docs/ # Documentation
```
## Common Tasks
### Adding a New Component
1. Create component in `src/components/`
2. Add tests in `src/components/**/*.test.tsx`
3. Export from appropriate index file
4. Update Storybook (if applicable)
### Adding a New API Endpoint
1. Add GraphQL type definition in `api/src/schema/typeDefs.ts`
2. Add resolver in `api/src/schema/resolvers.ts`
3. Add service logic in `api/src/services/`
4. Add tests
### Database Migrations
```bash
cd api
# Create migration
npm run db:migrate:create migration-name
# Run migrations
npm run db:migrate
```
## Debugging
### Frontend
- Use React DevTools
- Check browser console
- Use Next.js debug mode: `NODE_OPTIONS='--inspect' pnpm dev`
### Backend
- Use VS Code debugger
- Check API logs
- Use GraphQL Playground at `http://localhost:4000/graphql`
## Code Quality
### Linting
```bash
pnpm lint
```
### Type Checking
```bash
pnpm type-check
```
### Formatting
```bash
pnpm format
```
## Troubleshooting
See [TROUBLESHOOTING.md](./TROUBLESHOOTING.md) for common issues and solutions.

173
docs/TROUBLESHOOTING.md Normal file
View File

@@ -0,0 +1,173 @@
# Troubleshooting Guide
Common issues and their solutions.
## Installation Issues
### Node Version Mismatch
**Problem**: `Error: The engine "node" is incompatible with this module`
**Solution**: Use Node.js 18+:
```bash
nvm install 20
nvm use 20
```
### pnpm Not Found
**Problem**: `command not found: pnpm`
**Solution**: Install pnpm:
```bash
npm install -g pnpm
```
## Development Issues
### Port Already in Use
**Problem**: `Error: Port 3000 is already in use`
**Solution**:
- Kill the process using the port: `lsof -ti:3000 | xargs kill`
- Or use a different port: `PORT=3001 pnpm dev`
### Database Connection Errors
**Problem**: `Error: connect ECONNREFUSED`
**Solution**:
- Ensure PostgreSQL is running: `pg_isready`
- Check connection string in `.env.local`
- Verify database exists: `psql -l`
### Module Not Found Errors
**Problem**: `Module not found: Can't resolve '@/components/...'`
**Solution**:
- Clear `.next` directory: `rm -rf .next`
- Reinstall dependencies: `pnpm install`
- Restart dev server
## Build Issues
### TypeScript Errors
**Problem**: Type errors during build
**Solution**:
- Run type check: `pnpm type-check`
- Fix type errors
- Ensure all dependencies are installed
### Build Fails with Memory Error
**Problem**: `JavaScript heap out of memory`
**Solution**:
```bash
NODE_OPTIONS="--max-old-space-size=4096" pnpm build
```
## Test Issues
### Tests Fail with "Cannot find module"
**Problem**: Tests can't find modules
**Solution**:
- Clear test cache: `pnpm test --clearCache`
- Reinstall dependencies
- Check `vitest.config.ts` paths
### Coverage Not Generated
**Problem**: Coverage report is empty
**Solution**:
- Ensure coverage provider is installed
- Run: `pnpm test:coverage`
- Check `vitest.config.ts` coverage settings
## API Issues
### GraphQL Schema Errors
**Problem**: Schema validation errors
**Solution**:
- Check `api/src/schema/typeDefs.ts`
- Ensure all types are defined
- Verify resolver return types match schema
### Authentication Errors
**Problem**: `UNAUTHENTICATED` errors
**Solution**:
- Check JWT token in request headers
- Verify token hasn't expired
- Ensure `JWT_SECRET` is set in `.env.local`
## Portal Issues
### Keycloak Connection Errors
**Problem**: Cannot connect to Keycloak
**Solution**:
- Verify Keycloak URL in `.env.local`
- Check network connectivity
- Ensure Keycloak is running
### Crossplane API Errors
**Problem**: Cannot reach Crossplane API
**Solution**:
- Verify `NEXT_PUBLIC_CROSSPLANE_API` is set
- Check if running in Kubernetes context
- Verify API endpoint is accessible
## GitOps Issues
### ArgoCD Sync Failures
**Problem**: ArgoCD applications fail to sync
**Solution**:
- Check ArgoCD logs: `kubectl logs -n argocd deployment/argocd-application-controller`
- Verify Git repository access
- Check application manifests
## Performance Issues
### Slow Build Times
**Solution**:
- Use pnpm instead of npm
- Enable build cache
- Reduce bundle size
### Slow Development Server
**Solution**:
- Clear `.next` directory
- Restart dev server
- Check for large files in `public/`
## Getting Help
If you're still experiencing issues:
1. Check existing GitHub issues
2. Search documentation
3. Ask in discussions
4. Open a new issue with:
- Error message
- Steps to reproduce
- Environment details
- Relevant logs

132
docs/api/README.md Normal file
View File

@@ -0,0 +1,132 @@
# API Documentation
## GraphQL API
The Phoenix Sankofa Cloud API is a GraphQL API built with Apollo Server.
### Endpoint
- Development: `http://localhost:4000/graphql`
- Production: `https://api.sankofa.cloud/graphql`
### Authentication
All queries and mutations (except `login`) require authentication via JWT token:
```http
Authorization: Bearer <token>
```
### Schema
See [schema.graphql](./schema.graphql) for the complete GraphQL schema.
### Queries
#### Get Resources
```graphql
query GetResources($filter: ResourceFilter) {
resources(filter: $filter) {
id
name
type
status
site {
id
name
}
}
}
```
#### Get Sites
```graphql
query GetSites {
sites {
id
name
region
status
}
}
```
#### Get Current User
```graphql
query GetMe {
me {
id
email
name
role
}
}
```
### Mutations
#### Login
```graphql
mutation Login($email: String!, $password: String!) {
login(email: $email, password: $password) {
token
user {
id
email
name
}
}
}
```
#### Create Resource
```graphql
mutation CreateResource($input: CreateResourceInput!) {
createResource(input: $input) {
id
name
type
status
}
}
```
### Error Handling
The API returns errors in the standard GraphQL error format:
```json
{
"errors": [
{
"message": "Authentication required",
"extensions": {
"code": "UNAUTHENTICATED"
}
}
]
}
```
### Error Codes
- `UNAUTHENTICATED`: Authentication required
- `FORBIDDEN`: Insufficient permissions
- `NOT_FOUND`: Resource not found
- `VALIDATION_ERROR`: Input validation failed
- `SERVER_ERROR`: Internal server error
### Rate Limiting
- 100 requests per minute per IP
- 1000 requests per hour per authenticated user
### Examples
See [examples.md](./examples.md) for more usage examples.

109
docs/api/examples.md Normal file
View File

@@ -0,0 +1,109 @@
# API Usage Examples
## Authentication
### Login
```javascript
const LOGIN_MUTATION = gql`
mutation Login($email: String!, $password: String!) {
login(email: $email, password: $password) {
token
user {
id
email
name
}
}
}
`
const { data } = await client.mutate({
mutation: LOGIN_MUTATION,
variables: {
email: 'user@example.com',
password: 'password123'
}
})
// Store token
localStorage.setItem('token', data.login.token)
```
## Resources
### Get All Resources
```javascript
const GET_RESOURCES = gql`
query GetResources {
resources {
id
name
type
status
site {
name
region
}
}
}
`
const { data } = await client.query({
query: GET_RESOURCES
})
```
### Create Resource
```javascript
const CREATE_RESOURCE = gql`
mutation CreateResource($input: CreateResourceInput!) {
createResource(input: $input) {
id
name
type
status
}
}
`
const { data } = await client.mutate({
mutation: CREATE_RESOURCE,
variables: {
input: {
name: 'web-server-01',
type: 'VM',
siteId: 'site-id-here',
metadata: {
cpu: 4,
memory: '8Gi'
}
}
}
})
```
## Using React Hooks
```typescript
import { useResources, useCreateResource } from '@/lib/graphql/hooks'
function ResourcesList() {
const { data, loading, error } = useResources()
const { createResource } = useCreateResource()
if (loading) return <div>Loading...</div>
if (error) return <div>Error: {error.message}</div>
return (
<div>
{data?.resources.map(resource => (
<div key={resource.id}>{resource.name}</div>
))}
</div>
)
}
```

134
docs/api/schema.graphql Normal file
View File

@@ -0,0 +1,134 @@
# GraphQL Schema
```graphql
scalar DateTime
scalar JSON
type Query {
health: HealthStatus
resources(filter: ResourceFilter): [Resource!]!
resource(id: ID!): Resource
sites: [Site!]!
site(id: ID!): Site
me: User
users: [User!]!
user(id: ID!): User
}
type Mutation {
login(email: String!, password: String!): AuthPayload!
logout: Boolean!
createResource(input: CreateResourceInput!): Resource!
updateResource(id: ID!, input: UpdateResourceInput!): Resource!
deleteResource(id: ID!): Boolean!
createUser(input: CreateUserInput!): User!
updateUser(id: ID!, input: UpdateUserInput!): User!
deleteUser(id: ID!): Boolean!
}
type Subscription {
resourceUpdated(id: ID!): Resource!
resourceCreated: Resource!
resourceDeleted(id: ID!): ID!
}
type HealthStatus {
status: String!
timestamp: DateTime!
version: String!
}
type Resource {
id: ID!
name: String!
type: ResourceType!
status: ResourceStatus!
site: Site!
metadata: JSON
createdAt: DateTime!
updatedAt: DateTime!
}
type Site {
id: ID!
name: String!
region: String!
status: SiteStatus!
resources: [Resource!]!
createdAt: DateTime!
updatedAt: DateTime!
}
type User {
id: ID!
email: String!
name: String!
role: UserRole!
createdAt: DateTime!
updatedAt: DateTime!
}
type AuthPayload {
token: String!
user: User!
}
enum ResourceType {
VM
CONTAINER
STORAGE
NETWORK
}
enum ResourceStatus {
PENDING
PROVISIONING
RUNNING
STOPPED
ERROR
DELETING
}
enum SiteStatus {
ACTIVE
INACTIVE
MAINTENANCE
}
enum UserRole {
ADMIN
USER
VIEWER
}
input ResourceFilter {
type: ResourceType
status: ResourceStatus
siteId: ID
}
input CreateResourceInput {
name: String!
type: ResourceType!
siteId: ID!
metadata: JSON
}
input UpdateResourceInput {
name: String
metadata: JSON
}
input CreateUserInput {
email: String!
name: String!
password: String!
role: UserRole
}
input UpdateUserInput {
name: String
role: UserRole
}
```

View File

@@ -0,0 +1,85 @@
# Architecture Diagrams
This directory contains comprehensive architecture diagrams for the Hybrid Cloud Control Plane system.
## Diagrams
### 1. System Overview (`system-overview.svg`)
High-level view of all system components and their relationships:
- Cloudflare Zero Trust Layer
- Custom Portal (Next.js)
- Kubernetes Control Plane
- Observability Stack
- Identity Management (Keycloak)
- GitOps (ArgoCD)
- Proxmox Edge Sites
### 2. Data Flow (`data-flow.svg`)
Detailed data flow diagrams showing:
- **Authentication Flow**: User → Cloudflare Access → Portal → Keycloak
- **VM Provisioning Flow**: User → Portal → Crossplane → ArgoCD → Proxmox Provider → Proxmox Site
- **Monitoring Flow**: Proxmox Node → pve-exporter → Prometheus → Grafana → Portal
- **Log Aggregation Flow**: Services → Promtail → Loki → Portal
- **GitOps Deployment Flow**: Developer → Git Repo → ArgoCD → Kubernetes → Crossplane → Infrastructure
### 3. Network Topology (`network-topology.svg`)
Network architecture showing:
- Internet connectivity
- Cloudflare Global Network (200+ edge locations)
- Control Plane site with all services
- Three Proxmox sites (US-East, EU-West, APAC)
- Cloudflare Tunnels connecting all sites
- Local network addressing (10.0.0.0/16, 10.1.0.0/16, etc.)
- Security features (Zero Trust, no public IPs, encrypted tunnels)
### 4. Deployment Diagram (`deployment-diagram.svg`)
Infrastructure layout showing:
- **Control Plane Cluster**: 3 master nodes, 3 worker nodes, shared storage
- **Proxmox Site 1**: 3 nodes with Ceph storage cluster (200TB)
- **Proxmox Site 2**: 3 nodes with ZFS storage pools (80TB)
- **Proxmox Site 3**: 2 nodes with local storage (40TB)
- **Network Infrastructure**: Load balancers, routers, DNS, NTP, monitoring, backup
- **Cloudflare Tunnel Agents**: Per-site tunnel configurations
## Viewing the Diagrams
These SVG files can be viewed in:
- Web browsers (Chrome, Firefox, Safari, Edge)
- Vector graphics editors (Inkscape, Adobe Illustrator)
- Documentation tools (GitHub, GitLab, Confluence)
- VS Code with SVG preview extensions
## Generating PNG Versions
To convert SVG to PNG for presentations:
```bash
# Using Inkscape
inkscape --export-type=png --export-width=2400 system-overview.svg
# Using ImageMagick
convert -density 300 system-overview.svg system-overview.png
# Using rsvg-convert
rsvg-convert -w 2400 system-overview.svg > system-overview.png
```
## Diagram Maintenance
When updating diagrams:
1. Maintain consistent color scheme
2. Update component labels when architecture changes
3. Keep network addresses and IPs accurate
4. Document new components in this README
5. Export PNG versions for presentations if needed
## Color Scheme
- **Blue (#326CE5)**: Kubernetes components
- **Orange (#F38020)**: Cloudflare services
- **Orange (#E57000)**: Proxmox infrastructure
- **Teal (#00D4AA)**: Portal and UI components
- **Red (#E74C3C)**: Authentication/Identity
- **Gray (#34495E)**: Servers and infrastructure
- **Light Gray (#ECF0F1)**: Network segments

View File

@@ -0,0 +1,225 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1400 900">
<defs>
<style>
.user { fill: #9B59B6; stroke: #7D3C98; stroke-width: 2; }
.cloudflare { fill: #F38020; stroke: #C85F00; }
.portal { fill: #00D4AA; stroke: #00A888; }
.auth { fill: #E74C3C; stroke: #C0392B; }
.k8s { fill: #326CE5; stroke: #1E4A8A; }
.proxmox { fill: #E57000; stroke: #B85900; }
.text { font-family: Arial, sans-serif; font-size: 11px; fill: #333; }
.title { font-size: 14px; font-weight: bold; }
.arrow { stroke: #333; stroke-width: 2; fill: none; marker-end: url(#arrowhead); }
.flow-label { font-size: 10px; fill: #666; }
</style>
<marker id="arrowhead" markerWidth="10" markerHeight="10" refX="9" refY="3" orient="auto">
<polygon points="0 0, 10 3, 0 6" fill="#333" />
</marker>
</defs>
<text x="700" y="30" text-anchor="middle" class="text title">Data Flow Diagrams - Authentication & Provisioning</text>
<!-- Authentication Flow Section -->
<text x="350" y="70" text-anchor="middle" class="text title">1. Authentication Flow</text>
<!-- User -->
<circle cx="100" cy="150" r="40" class="user"/>
<text x="100" y="155" text-anchor="middle" class="text" fill="white">User</text>
<!-- Cloudflare Access -->
<rect x="200" y="120" width="120" height="60" class="cloudflare" rx="5"/>
<text x="260" y="145" text-anchor="middle" class="text" fill="white">Cloudflare</text>
<text x="260" y="165" text-anchor="middle" class="text" fill="white">Access</text>
<!-- Portal -->
<rect x="380" y="120" width="120" height="60" class="portal" rx="5"/>
<text x="440" y="145" text-anchor="middle" class="text" fill="white">Portal</text>
<text x="440" y="165" text-anchor="middle" class="text" fill="white">(Next.js)</text>
<!-- Keycloak -->
<rect x="560" y="120" width="120" height="60" class="auth" rx="5"/>
<text x="620" y="145" text-anchor="middle" class="text" fill="white">Keycloak</text>
<text x="620" y="165" text-anchor="middle" class="text" fill="white">OIDC</text>
<!-- Auth Flow Arrows -->
<line x1="140" y1="150" x2="200" y2="150" class="arrow"/>
<text x="170" y="145" class="flow-label">1. Request</text>
<line x1="320" y1="150" x2="380" y2="150" class="arrow"/>
<text x="350" y="145" class="flow-label">2. Redirect</text>
<line x1="500" y1="150" x2="560" y2="150" class="arrow"/>
<text x="530" y="145" class="flow-label">3. Auth</text>
<line x1="560" y1="180" x2="500" y2="180" class="arrow"/>
<text x="530" y="185" class="flow-label">4. JWT</text>
<line x1="380" y1="180" x2="320" y2="180" class="arrow"/>
<text x="350" y="185" class="flow-label">5. Token</text>
<line x1="200" y1="180" x2="140" y2="180" class="arrow"/>
<text x="170" y="185" class="flow-label">6. Session</text>
<!-- Provisioning Flow Section -->
<text x="1050" y="70" text-anchor="middle" class="text title">2. VM Provisioning Flow</text>
<!-- User Action -->
<circle cx="900" cy="150" r="40" class="user"/>
<text x="900" y="155" text-anchor="middle" class="text" fill="white">User</text>
<!-- Portal -->
<rect x="1000" y="120" width="120" height="60" class="portal" rx="5"/>
<text x="1060" y="145" text-anchor="middle" class="text" fill="white">Portal</text>
<text x="1060" y="165" text-anchor="middle" class="text" fill="white">UI</text>
<!-- Crossplane -->
<rect x="1180" y="120" width="120" height="60" class="k8s" rx="5"/>
<text x="1240" y="145" text-anchor="middle" class="text" fill="white">Crossplane</text>
<text x="1240" y="165" text-anchor="middle" class="text" fill="white">API</text>
<!-- ArgoCD -->
<rect x="1360" y="120" width="120" height="60" class="k8s" rx="5"/>
<text x="1420" y="145" text-anchor="middle" class="text" fill="white">ArgoCD</text>
<text x="1420" y="165" text-anchor="middle" class="text" fill="white">GitOps</text>
<!-- Provisioning Flow Arrows -->
<line x1="940" y1="150" x2="1000" y2="150" class="arrow"/>
<text x="970" y="145" class="flow-label">1. Create VM</text>
<line x1="1120" y1="150" x2="1180" y2="150" class="arrow"/>
<text x="1150" y="145" class="flow-label">2. CRD</text>
<line x1="1300" y1="150" x2="1360" y2="150" class="arrow"/>
<text x="1330" y="145" class="flow-label">3. Sync</text>
<!-- Proxmox Provider -->
<rect x="1180" y="250" width="120" height="60" class="proxmox" rx="5"/>
<text x="1240" y="275" text-anchor="middle" class="text" fill="white">Proxmox</text>
<text x="1240" y="295" text-anchor="middle" class="text" fill="white">Provider</text>
<!-- Proxmox Site -->
<rect x="1360" y="250" width="120" height="60" class="proxmox" rx="5"/>
<text x="1420" y="275" text-anchor="middle" class="text" fill="white">Proxmox</text>
<text x="1420" y="295" text-anchor="middle" class="text" fill="white">Site</text>
<line x1="1240" y1="250" x2="1240" y2="220" class="arrow"/>
<text x="1270" y="235" class="flow-label">4. Reconcile</text>
<line x1="1300" y1="280" x2="1360" y2="280" class="arrow"/>
<text x="1330" y="275" class="flow-label">5. API Call</text>
<line x1="1360" y1="310" x2="1300" y2="310" class="arrow"/>
<text x="1330" y="315" class="flow-label">6. Status</text>
<!-- Monitoring Flow Section -->
<text x="350" y="400" text-anchor="middle" class="text title">3. Monitoring & Observability Flow</text>
<!-- Proxmox Node -->
<rect x="50" y="450" width="120" height="60" class="proxmox" rx="5"/>
<text x="110" y="475" text-anchor="middle" class="text" fill="white">Proxmox</text>
<text x="110" y="495" text-anchor="middle" class="text" fill="white">Node</text>
<!-- Exporter -->
<rect x="220" y="450" width="120" height="60" class="k8s" rx="5"/>
<text x="280" y="475" text-anchor="middle" class="text" fill="white">pve-exporter</text>
<text x="280" y="495" text-anchor="middle" class="text" fill="white">Prometheus</text>
<!-- Prometheus -->
<rect x="390" y="450" width="120" height="60" class="k8s" rx="5"/>
<text x="450" y="475" text-anchor="middle" class="text" fill="white">Prometheus</text>
<text x="450" y="495" text-anchor="middle" class="text" fill="white">Metrics DB</text>
<!-- Grafana -->
<rect x="560" y="450" width="120" height="60" class="portal" rx="5"/>
<text x="620" y="475" text-anchor="middle" class="text" fill="white">Grafana</text>
<text x="620" y="495" text-anchor="middle" class="text" fill="white">Dashboards</text>
<!-- Portal -->
<rect x="730" y="450" width="120" height="60" class="portal" rx="5"/>
<text x="790" y="475" text-anchor="middle" class="text" fill="white">Portal</text>
<text x="790" y="495" text-anchor="middle" class="text" fill="white">Embed</text>
<!-- Monitoring Flow Arrows -->
<line x1="170" y1="480" x2="220" y2="480" class="arrow"/>
<text x="195" y="475" class="flow-label">1. Metrics</text>
<line x1="340" y1="480" x2="390" y2="480" class="arrow"/>
<text x="365" y="475" class="flow-label">2. Scrape</text>
<line x1="510" y1="480" x2="560" y2="480" class="arrow"/>
<text x="535" y="475" class="flow-label">3. Query</text>
<line x1="680" y1="480" x2="730" y2="480" class="arrow"/>
<text x="705" y="475" class="flow-label">4. Display</text>
<!-- Log Flow Section -->
<text x="1050" y="400" text-anchor="middle" class="text title">4. Log Aggregation Flow</text>
<!-- Services -->
<rect x="900" y="450" width="120" height="60" class="k8s" rx="5"/>
<text x="960" y="475" text-anchor="middle" class="text" fill="white">Services</text>
<text x="960" y="495" text-anchor="middle" class="text" fill="white">(K8s/VMs)</text>
<!-- Promtail -->
<rect x="1070" y="450" width="120" height="60" class="k8s" rx="5"/>
<text x="1130" y="475" text-anchor="middle" class="text" fill="white">Promtail</text>
<text x="1130" y="495" text-anchor="middle" class="text" fill="white">Collector</text>
<!-- Loki -->
<rect x="1240" y="450" width="120" height="60" class="k8s" rx="5"/>
<text x="1300" y="475" text-anchor="middle" class="text" fill="white">Loki</text>
<text x="1300" y="495" text-anchor="middle" class="text" fill="white">Log Store</text>
<!-- Portal -->
<rect x="1410" y="450" width="120" height="60" class="portal" rx="5"/>
<text x="1470" y="475" text-anchor="middle" class="text" fill="white">Portal</text>
<text x="1470" y="495" text-anchor="middle" class="text" fill="white">Viewer</text>
<!-- Log Flow Arrows -->
<line x1="1020" y1="480" x2="1070" y2="480" class="arrow"/>
<text x="1045" y="475" class="flow-label">1. Logs</text>
<line x1="1190" y1="480" x2="1240" y2="480" class="arrow"/>
<text x="1215" y="475" class="flow-label">2. Ship</text>
<line x1="1360" y1="480" x2="1410" y2="480" class="arrow"/>
<text x="1385" y="475" class="flow-label">3. Query</text>
<!-- GitOps Flow Section -->
<text x="700" y="600" text-anchor="middle" class="text title">5. GitOps Deployment Flow</text>
<!-- Developer -->
<circle cx="200" cy="700" r="40" class="user"/>
<text x="200" y="705" text-anchor="middle" class="text" fill="white">Dev</text>
<!-- Git Repo -->
<rect x="300" y="670" width="120" height="60" class="k8s" rx="5"/>
<text x="360" y="695" text-anchor="middle" class="text" fill="white">Git Repo</text>
<text x="360" y="715" text-anchor="middle" class="text" fill="white">(Manifests)</text>
<!-- ArgoCD -->
<rect x="480" y="670" width="120" height="60" class="k8s" rx="5"/>
<text x="540" y="695" text-anchor="middle" class="text" fill="white">ArgoCD</text>
<text x="540" y="715" text-anchor="middle" class="text" fill="white">Controller</text>
<!-- Kubernetes -->
<rect x="660" y="670" width="120" height="60" class="k8s" rx="5"/>
<text x="720" y="695" text-anchor="middle" class="text" fill="white">Kubernetes</text>
<text x="720" y="715" text-anchor="middle" class="text" fill="white">API Server</text>
<!-- Crossplane -->
<rect x="840" y="670" width="120" height="60" class="k8s" rx="5"/>
<text x="900" y="695" text-anchor="middle" class="text" fill="white">Crossplane</text>
<text x="900" y="715" text-anchor="middle" class="text" fill="white">Providers</text>
<!-- Infrastructure -->
<rect x="1020" y="670" width="120" height="60" class="proxmox" rx="5"/>
<text x="1080" y="695" text-anchor="middle" class="text" fill="white">Infra</text>
<text x="1080" y="715" text-anchor="middle" class="text" fill="white">(Proxmox)</text>
<!-- GitOps Flow Arrows -->
<line x1="240" y1="700" x2="300" y2="700" class="arrow"/>
<text x="270" y="695" class="flow-label">1. Commit</text>
<line x1="420" y1="700" x2="480" y2="700" class="arrow"/>
<text x="450" y="695" class="flow-label">2. Poll</text>
<line x1="600" y1="700" x2="660" y2="700" class="arrow"/>
<text x="630" y="695" class="flow-label">3. Apply</text>
<line x1="780" y1="700" x2="840" y2="700" class="arrow"/>
<text x="810" y="695" class="flow-label">4. Reconcile</text>
<line x1="960" y1="700" x2="1020" y2="700" class="arrow"/>
<text x="990" y="695" class="flow-label">5. Provision</text>
<!-- Status Feedback -->
<line x1="1020" y1="730" x2="960" y2="730" class="arrow" stroke-dasharray="3,3"/>
<text x="990" y="735" class="flow-label">6. Status</text>
<line x1="840" y1="730" x2="780" y2="730" class="arrow" stroke-dasharray="3,3"/>
<line x1="660" y1="730" x2="600" y2="730" class="arrow" stroke-dasharray="3,3"/>
<line x1="480" y1="730" x2="420" y2="730" class="arrow" stroke-dasharray="3,3"/>
</svg>

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -0,0 +1,638 @@
# Phoenix Sankofa Cloud: Data Model & GraphQL Schema
## Overview
The data model for **Phoenix Sankofa Cloud** is designed as a **graph-oriented structure** that represents:
* Infrastructure resources (regions, clusters, nodes, services)
* Relationships between resources (networks, dependencies, policies)
* Metrics and telemetry (performance, health, cost)
* Well-Architected Framework assessments
* Identity and access management
* Cultural intelligence and regional context
---
## Core GraphQL Schema
### Resource Types
```graphql
# Base resource interface
interface Resource {
id: ID!
name: String!
type: ResourceType!
region: Region
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
}
enum ResourceType {
REGION
SITE
CLUSTER
NODE
VM
POD
SERVICE
NETWORK
STORAGE
TUNNEL
POLICY
}
# Region - Top-level geographic location
type Region implements Resource {
id: ID!
name: String!
type: ResourceType!
code: String! # e.g., "us-east-1"
country: String
coordinates: Coordinates
sites: [Site!]!
clusters: [Cluster!]!
networks: [Network!]!
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
culturalContext: CulturalContext
}
# Site - Physical or logical location within a region
type Site implements Resource {
id: ID!
name: String!
type: ResourceType!
region: Region!
clusters: [Cluster!]!
networks: [Network!]!
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
}
# Cluster - Compute cluster (Kubernetes, Proxmox, etc.)
type Cluster implements Resource {
id: ID!
name: String!
type: ResourceType!
region: Region
site: Site
nodes: [Node!]!
services: [Service!]!
provider: ProviderType
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
health: HealthStatus!
metrics: ClusterMetrics
}
# Node - Individual compute node
type Node implements Resource {
id: ID!
name: String!
type: ResourceType!
cluster: Cluster!
vms: [VM!]!
pods: [Pod!]!
provider: ProviderType
specs: NodeSpecs
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
health: HealthStatus!
metrics: NodeMetrics
}
# Service - Application service
type Service implements Resource {
id: ID!
name: String!
type: ResourceType!
cluster: Cluster
pods: [Pod!]!
dependencies: [Service!]! # Services this depends on
dependents: [Service!]! # Services that depend on this
networks: [Network!]!
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
health: HealthStatus!
metrics: ServiceMetrics
}
# Network - Network resource (VPC, subnet, tunnel, etc.)
type Network implements Resource {
id: ID!
name: String!
type: ResourceType!
region: Region
cidr: String
connections: [NetworkConnection!]!
services: [Service!]!
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
}
# Network Connection - Edge between networks
type NetworkConnection {
id: ID!
from: Network!
to: Network!
type: ConnectionType!
latency: Float
bandwidth: Float
status: ConnectionStatus!
createdAt: DateTime!
updatedAt: DateTime!
}
enum ConnectionType {
PEERING
VPN
TUNNEL
DIRECT
}
enum ConnectionStatus {
ACTIVE
INACTIVE
DEGRADED
FAILED
}
# Storage - Storage resource
type Storage implements Resource {
id: ID!
name: String!
type: ResourceType!
region: Region
cluster: Cluster
size: Float!
used: Float!
createdAt: DateTime!
updatedAt: DateTime!
metadata: JSON
tags: [Tag!]!
metrics: StorageMetrics
}
```
### Well-Architected Framework
```graphql
# Well-Architected Framework Pillar
type Pillar {
id: ID!
name: String!
code: PillarCode!
description: String
controls: [Control!]!
resources: [Resource!]!
}
enum PillarCode {
SECURITY
RELIABILITY
COST_OPTIMIZATION
PERFORMANCE_EFFICIENCY
OPERATIONAL_EXCELLENCE
SUSTAINABILITY
}
# Control - Specific control within a pillar
type Control {
id: ID!
name: String!
code: String!
pillar: Pillar!
description: String
findings: [Finding!]!
resources: [Resource!]!
}
# Finding - Assessment finding for a control
type Finding {
id: ID!
control: Control!
resource: Resource!
status: FindingStatus!
severity: Severity!
title: String!
description: String
recommendation: String
createdAt: DateTime!
updatedAt: DateTime!
}
enum FindingStatus {
PASS
FAIL
WARNING
INFO
NOT_APPLICABLE
}
enum Severity {
CRITICAL
HIGH
MEDIUM
LOW
INFO
}
# Risk - Risk associated with a resource
type Risk {
id: ID!
resource: Resource!
pillar: Pillar
severity: Severity!
title: String!
description: String
mitigation: String
createdAt: DateTime!
updatedAt: DateTime!
}
```
### Metrics & Telemetry
```graphql
# Metrics - Time-series metrics
type Metrics {
resource: Resource!
metricType: MetricType!
values: [MetricValue!]!
timeRange: TimeRange!
}
enum MetricType {
CPU_USAGE
MEMORY_USAGE
NETWORK_THROUGHPUT
NETWORK_LATENCY
STORAGE_IOPS
REQUEST_RATE
ERROR_RATE
COST
HEALTH_SCORE
}
type MetricValue {
timestamp: DateTime!
value: Float!
labels: JSON
}
type TimeRange {
start: DateTime!
end: DateTime!
}
# Health Status
enum HealthStatus {
HEALTHY
DEGRADED
UNHEALTHY
UNKNOWN
}
# Cluster Metrics
type ClusterMetrics {
cpuUsage: Float!
memoryUsage: Float!
nodeCount: Int!
healthyNodes: Int!
unhealthyNodes: Int!
totalCost: Float!
healthScore: Float!
}
# Node Metrics
type NodeMetrics {
cpuUsage: Float!
memoryUsage: Float!
diskUsage: Float!
networkThroughput: Float!
healthScore: Float!
}
# Service Metrics
type ServiceMetrics {
requestRate: Float!
errorRate: Float!
latency: Float!
availability: Float!
healthScore: Float!
}
# Storage Metrics
type StorageMetrics {
used: Float!
available: Float!
iops: Float!
throughput: Float!
}
```
### Cultural Intelligence
```graphql
# Cultural Context - Cultural information for a region
type CulturalContext {
region: Region!
language: String
timezone: String
culturalNorms: JSON
complianceRequirements: [ComplianceRequirement!]!
dataResidency: DataResidency
}
# Data Residency
type DataResidency {
region: Region!
requirements: [String!]!
compliance: [ComplianceRequirement!]!
}
```
### Identity & Access
```graphql
# Identity - User or service identity
type Identity {
id: ID!
type: IdentityType!
name: String!
email: String
roles: [Role!]!
permissions: [Permission!]!
createdAt: DateTime!
updatedAt: DateTime!
}
enum IdentityType {
USER
SERVICE
SYSTEM
}
# Role - Access role
type Role {
id: ID!
name: String!
permissions: [Permission!]!
resources: [Resource!]!
}
# Permission - Access permission
type Permission {
id: ID!
action: Action!
resource: Resource
conditions: JSON
}
enum Action {
READ
WRITE
DELETE
ADMIN
}
```
### Queries
```graphql
type Query {
# Resource queries
resource(id: ID!): Resource
resources(filter: ResourceFilter): [Resource!]!
region(id: ID!): Region
regions: [Region!]!
cluster(id: ID!): Cluster
clusters(filter: ClusterFilter): [Cluster!]!
# Network queries
network(id: ID!): Network
networks(filter: NetworkFilter): [Network!]!
networkTopology(regionId: ID): NetworkTopology!
# Service queries
service(id: ID!): Service
services(filter: ServiceFilter): [Service!]!
serviceDependencies(serviceId: ID!): [Service!]!
# Well-Architected Framework
pillar(code: PillarCode!): Pillar
pillars: [Pillar!]!
findings(filter: FindingFilter): [Finding!]!
risks(resourceId: ID): [Risk!]!
# Metrics
metrics(resourceId: ID!, metricType: MetricType!, timeRange: TimeRange!): Metrics!
# Health
health(resourceId: ID!): HealthStatus!
# Cultural context
culturalContext(regionId: ID!): CulturalContext
}
```
### Mutations
```graphql
type Mutation {
# Resource mutations
createResource(input: CreateResourceInput!): Resource!
updateResource(id: ID!, input: UpdateResourceInput!): Resource!
deleteResource(id: ID!): Boolean!
# Finding mutations
createFinding(input: CreateFindingInput!): Finding!
updateFinding(id: ID!, input: UpdateFindingInput!): Finding!
# Risk mutations
createRisk(input: CreateRiskInput!): Risk!
updateRisk(id: ID!, input: UpdateRiskInput!): Risk!
}
```
### Subscriptions
```graphql
type Subscription {
# Real-time updates
resourceUpdated(resourceId: ID!): Resource!
metricsUpdated(resourceId: ID!, metricType: MetricType!): MetricValue!
healthChanged(resourceId: ID!): HealthStatus!
findingCreated(controlId: ID): Finding!
riskCreated(resourceId: ID): Risk!
}
```
---
## Database Schema (PostgreSQL)
### Core Tables
```sql
-- Regions
CREATE TABLE regions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL,
code VARCHAR(50) UNIQUE NOT NULL,
country VARCHAR(100),
latitude DECIMAL(10, 8),
longitude DECIMAL(11, 8),
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
metadata JSONB
);
-- Resources (polymorphic)
CREATE TABLE resources (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL,
type VARCHAR(50) NOT NULL,
region_id UUID REFERENCES regions(id),
site_id UUID,
cluster_id UUID,
parent_id UUID REFERENCES resources(id),
provider VARCHAR(50),
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
metadata JSONB
);
-- Resource relationships (graph edges)
CREATE TABLE resource_relationships (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
from_resource_id UUID NOT NULL REFERENCES resources(id),
to_resource_id UUID NOT NULL REFERENCES resources(id),
relationship_type VARCHAR(50) NOT NULL,
metadata JSONB,
created_at TIMESTAMP DEFAULT NOW(),
UNIQUE(from_resource_id, to_resource_id, relationship_type)
);
-- Metrics (time-series)
CREATE TABLE metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
resource_id UUID NOT NULL REFERENCES resources(id),
metric_type VARCHAR(50) NOT NULL,
value DECIMAL(20, 4) NOT NULL,
timestamp TIMESTAMP NOT NULL,
labels JSONB,
PRIMARY KEY (resource_id, metric_type, timestamp)
);
-- Well-Architected Framework
CREATE TABLE pillars (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
code VARCHAR(50) UNIQUE NOT NULL,
name VARCHAR(255) NOT NULL,
description TEXT
);
CREATE TABLE controls (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
pillar_id UUID NOT NULL REFERENCES pillars(id),
code VARCHAR(50) NOT NULL,
name VARCHAR(255) NOT NULL,
description TEXT,
UNIQUE(pillar_id, code)
);
CREATE TABLE findings (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
control_id UUID NOT NULL REFERENCES controls(id),
resource_id UUID NOT NULL REFERENCES resources(id),
status VARCHAR(50) NOT NULL,
severity VARCHAR(50) NOT NULL,
title VARCHAR(255) NOT NULL,
description TEXT,
recommendation TEXT,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW()
);
-- Indexes for performance
CREATE INDEX idx_resources_type ON resources(type);
CREATE INDEX idx_resources_region ON resources(region_id);
CREATE INDEX idx_metrics_resource_time ON metrics(resource_id, timestamp DESC);
CREATE INDEX idx_findings_resource ON findings(resource_id);
CREATE INDEX idx_findings_control ON findings(control_id);
```
---
## Neo4j Graph Schema (Optional)
For complex graph queries, Neo4j can be used:
```cypher
// Node labels
(:Region)
(:Site)
(:Cluster)
(:Node)
(:Service)
(:Network)
(:Storage)
// Relationships
(:Region)-[:CONTAINS]->(:Site)
(:Site)-[:CONTAINS]->(:Cluster)
(:Cluster)-[:CONTAINS]->(:Node)
(:Service)-[:DEPENDS_ON]->(:Service)
(:Network)-[:CONNECTS_TO]->(:Network)
(:Resource)-[:AFFECTS_PILLAR]->(:Pillar)
```
---
## Integration Points
### Control Plane Adapters
The data model integrates with:
1. **Proxmox**: Cluster, VM, storage data
2. **Kubernetes/Crossplane**: Pod, service, network data
3. **Cloudflare**: Tunnel, DNS, network data
4. **Prometheus**: Metrics and telemetry
5. **Custom APIs**: Additional infrastructure sources
### Normalization
All control plane data is normalized into the unified graph model, allowing:
* Single source of truth
* Consistent query interface
* Cross-platform relationships
* Unified visualization
---
## Future Enhancements
1. **AI/ML Integration**: Model predictions and recommendations
2. **Cost Optimization**: Cost tracking and optimization recommendations
3. **Security Posture**: Security assessments and threat intelligence
4. **Compliance**: Compliance tracking and reporting
5. **Cultural Intelligence**: Enhanced cultural context and adaptation

View File

@@ -0,0 +1,244 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1600 1100">
<defs>
<style>
.server { fill: #34495E; stroke: #2C3E50; stroke-width: 2; }
.k8s-node { fill: #326CE5; stroke: #1E4A8A; stroke-width: 2; }
.proxmox-node { fill: #E57000; stroke: #B85900; stroke-width: 2; }
.storage { fill: #95A5A6; stroke: #7F8C8D; stroke-width: 2; }
.network { fill: #3498DB; stroke: #2980B9; stroke-width: 1; }
.text { font-family: Arial, sans-serif; font-size: 11px; fill: #333; }
.title { font-size: 14px; font-weight: bold; }
.label { font-size: 9px; fill: #666; }
</style>
</defs>
<text x="800" y="30" text-anchor="middle" class="text title">Deployment Architecture - Infrastructure Layout</text>
<!-- Control Plane Cluster -->
<text x="400" y="70" text-anchor="middle" class="text title">Control Plane Cluster (Primary Site)</text>
<!-- K8s Master Nodes -->
<rect x="50" y="100" width="200" height="120" class="k8s-node" rx="5"/>
<text x="150" y="125" text-anchor="middle" class="text" fill="white">K8s Master 1</text>
<text x="150" y="145" text-anchor="middle" class="text" fill="white">etcd, API Server</text>
<text x="150" y="165" text-anchor="middle" class="text" fill="white">Scheduler, Controller</text>
<text x="150" y="185" text-anchor="middle" class="label" fill="white">CPU: 8 cores</text>
<text x="150" y="200" text-anchor="middle" class="label" fill="white">RAM: 32GB</text>
<text x="150" y="215" text-anchor="middle" class="label" fill="white">Disk: 500GB SSD</text>
<rect x="300" y="100" width="200" height="120" class="k8s-node" rx="5"/>
<text x="400" y="125" text-anchor="middle" class="text" fill="white">K8s Master 2</text>
<text x="400" y="145" text-anchor="middle" class="text" fill="white">etcd, API Server</text>
<text x="400" y="165" text-anchor="middle" class="text" fill="white">Scheduler, Controller</text>
<text x="400" y="185" text-anchor="middle" class="label" fill="white">CPU: 8 cores</text>
<text x="400" y="200" text-anchor="middle" class="label" fill="white">RAM: 32GB</text>
<text x="400" y="215" text-anchor="middle" class="label" fill="white">Disk: 500GB SSD</text>
<rect x="550" y="100" width="200" height="120" class="k8s-node" rx="5"/>
<text x="650" y="125" text-anchor="middle" class="text" fill="white">K8s Master 3</text>
<text x="650" y="145" text-anchor="middle" class="text" fill="white">etcd, API Server</text>
<text x="650" y="165" text-anchor="middle" class="text" fill="white">Scheduler, Controller</text>
<text x="650" y="185" text-anchor="middle" class="label" fill="white">CPU: 8 cores</text>
<text x="650" y="200" text-anchor="middle" class="label" fill="white">RAM: 32GB</text>
<text x="650" y="215" text-anchor="middle" class="label" fill="white">Disk: 500GB SSD</text>
<!-- K8s Worker Nodes -->
<rect x="100" y="260" width="180" height="100" class="k8s-node" rx="5"/>
<text x="190" y="285" text-anchor="middle" class="text" fill="white">Worker 1</text>
<text x="190" y="305" text-anchor="middle" class="text" fill="white">Rancher, Crossplane</text>
<text x="190" y="325" text-anchor="middle" class="text" fill="white">ArgoCD, Vault</text>
<text x="190" y="345" text-anchor="middle" class="label" fill="white">CPU: 16 cores, RAM: 64GB</text>
<rect x="320" y="260" width="180" height="100" class="k8s-node" rx="5"/>
<text x="410" y="285" text-anchor="middle" class="text" fill="white">Worker 2</text>
<text x="410" y="305" text-anchor="middle" class="text" fill="white">Portal (Next.js)</text>
<text x="410" y="325" text-anchor="middle" class="text" fill="white">Keycloak</text>
<text x="410" y="345" text-anchor="middle" class="label" fill="white">CPU: 16 cores, RAM: 64GB</text>
<rect x="540" y="260" width="180" height="100" class="k8s-node" rx="5"/>
<text x="630" y="285" text-anchor="middle" class="text" fill="white">Worker 3</text>
<text x="630" y="305" text-anchor="middle" class="text" fill="white">Prometheus, Grafana</text>
<text x="630" y="325" text-anchor="middle" class="text" fill="white">Loki, Tempo</text>
<text x="630" y="345" text-anchor="middle" class="label" fill="white">CPU: 16 cores, RAM: 64GB</text>
<!-- Storage for Control Plane -->
<rect x="750" y="100" width="150" height="260" class="storage" rx="5"/>
<text x="825" y="125" text-anchor="middle" class="text" fill="white">Shared Storage</text>
<text x="825" y="150" text-anchor="middle" class="text" fill="white">(NFS/Ceph)</text>
<text x="825" y="180" text-anchor="middle" class="label" fill="white">• ETCD Backups</text>
<text x="825" y="200" text-anchor="middle" class="label" fill="white">• PVC Storage</text>
<text x="825" y="220" text-anchor="middle" class="label" fill="white">• Log Archives</text>
<text x="825" y="240" text-anchor="middle" class="label" fill="white">• Metrics Data</text>
<text x="825" y="260" text-anchor="middle" class="label" fill="white">• Config Backups</text>
<text x="825" y="280" text-anchor="middle" class="label" fill="white">Capacity: 10TB</text>
<text x="825" y="300" text-anchor="middle" class="label" fill="white">Replication: 3x</text>
<text x="825" y="320" text-anchor="middle" class="label" fill="white">Type: Ceph RBD</text>
<text x="825" y="340" text-anchor="middle" class="label" fill="white">Performance: NVMe</text>
<!-- Proxmox Site 1 -->
<text x="1200" y="70" text-anchor="middle" class="text title">Proxmox Site 1 - US-East</text>
<rect x="950" y="100" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="1050" y="125" text-anchor="middle" class="text" fill="white">PVE Node 1</text>
<text x="1050" y="145" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="1050" y="165" text-anchor="middle" class="label" fill="white">CPU: 32 cores</text>
<text x="1050" y="180" text-anchor="middle" class="label" fill="white">RAM: 256GB</text>
<text x="1050" y="195" text-anchor="middle" class="label" fill="white">VMs: 20</text>
<text x="1050" y="210" text-anchor="middle" class="label" fill="white">Storage: Ceph OSD</text>
<rect x="1200" y="100" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="1300" y="125" text-anchor="middle" class="text" fill="white">PVE Node 2</text>
<text x="1300" y="145" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="1300" y="165" text-anchor="middle" class="label" fill="white">CPU: 32 cores</text>
<text x="1300" y="180" text-anchor="middle" class="label" fill="white">RAM: 256GB</text>
<text x="1300" y="195" text-anchor="middle" class="label" fill="white">VMs: 18</text>
<text x="1300" y="210" text-anchor="middle" class="label" fill="white">Storage: Ceph OSD</text>
<rect x="1450" y="100" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="1550" y="125" text-anchor="middle" class="text" fill="white">PVE Node 3</text>
<text x="1550" y="145" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="1550" y="165" text-anchor="middle" class="label" fill="white">CPU: 32 cores</text>
<text x="1550" y="180" text-anchor="middle" class="label" fill="white">RAM: 256GB</text>
<text x="1550" y="195" text-anchor="middle" class="label" fill="white">VMs: 15</text>
<text x="1550" y="210" text-anchor="middle" class="label" fill="white">Storage: Ceph OSD</text>
<!-- Ceph Storage Cluster -->
<rect x="1000" y="260" width="500" height="100" class="storage" rx="5"/>
<text x="1250" y="285" text-anchor="middle" class="text" fill="white">Ceph Storage Cluster</text>
<text x="1100" y="310" text-anchor="middle" class="label" fill="white">MON: 3 nodes</text>
<text x="1250" y="310" text-anchor="middle" class="label" fill="white">OSD: 9 nodes</text>
<text x="1400" y="310" text-anchor="middle" class="label" fill="white">MDS: 2 nodes</text>
<text x="1250" y="330" text-anchor="middle" class="label" fill="white">Total: 200TB, Replication: 3x</text>
<!-- Proxmox Site 2 -->
<text x="400" y="420" text-anchor="middle" class="text title">Proxmox Site 2 - EU-West</text>
<rect x="50" y="450" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="150" y="475" text-anchor="middle" class="text" fill="white">PVE Node 1</text>
<text x="150" y="495" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="150" y="515" text-anchor="middle" class="label" fill="white">CPU: 24 cores</text>
<text x="150" y="530" text-anchor="middle" class="label" fill="white">RAM: 192GB</text>
<text x="150" y="545" text-anchor="middle" class="label" fill="white">VMs: 15</text>
<text x="150" y="560" text-anchor="middle" class="label" fill="white">Storage: ZFS</text>
<rect x="300" y="450" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="400" y="475" text-anchor="middle" class="text" fill="white">PVE Node 2</text>
<text x="400" y="495" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="400" y="515" text-anchor="middle" class="label" fill="white">CPU: 24 cores</text>
<text x="400" y="530" text-anchor="middle" class="label" fill="white">RAM: 192GB</text>
<text x="400" y="545" text-anchor="middle" class="label" fill="white">VMs: 12</text>
<text x="400" y="560" text-anchor="middle" class="label" fill="white">Storage: ZFS</text>
<rect x="550" y="450" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="650" y="475" text-anchor="middle" class="text" fill="white">PVE Node 3</text>
<text x="650" y="495" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="650" y="515" text-anchor="middle" class="label" fill="white">CPU: 24 cores</text>
<text x="650" y="530" text-anchor="middle" class="label" fill="white">RAM: 192GB</text>
<text x="650" y="545" text-anchor="middle" class="label" fill="white">VMs: 10</text>
<text x="650" y="560" text-anchor="middle" class="label" fill="white">Storage: ZFS</text>
<!-- ZFS Storage -->
<rect x="100" y="610" width="500" height="80" class="storage" rx="5"/>
<text x="350" y="635" text-anchor="middle" class="text" fill="white">ZFS Storage Pools</text>
<text x="250" y="660" text-anchor="middle" class="label" fill="white">Pool 1: 50TB (RAID-Z2)</text>
<text x="450" y="660" text-anchor="middle" class="label" fill="white">Pool 2: 30TB (RAID-Z1)</text>
<text x="350" y="680" text-anchor="middle" class="label" fill="white">Replication: Async to Site 1</text>
<!-- Proxmox Site 3 -->
<text x="1200" y="420" text-anchor="middle" class="text title">Proxmox Site 3 - APAC</text>
<rect x="950" y="450" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="1050" y="475" text-anchor="middle" class="text" fill="white">PVE Node 1</text>
<text x="1050" y="495" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="1050" y="515" text-anchor="middle" class="label" fill="white">CPU: 16 cores</text>
<text x="1050" y="530" text-anchor="middle" class="label" fill="white">RAM: 128GB</text>
<text x="1050" y="545" text-anchor="middle" class="label" fill="white">VMs: 10</text>
<text x="1050" y="560" text-anchor="middle" class="label" fill="white">Storage: Local</text>
<rect x="1200" y="450" width="200" height="120" class="proxmox-node" rx="5"/>
<text x="1300" y="475" text-anchor="middle" class="text" fill="white">PVE Node 2</text>
<text x="1300" y="495" text-anchor="middle" class="text" fill="white">Hypervisor</text>
<text x="1300" y="515" text-anchor="middle" class="label" fill="white">CPU: 16 cores</text>
<text x="1300" y="530" text-anchor="middle" class="label" fill="white">RAM: 128GB</text>
<text x="1300" y="545" text-anchor="middle" class="label" fill="white">VMs: 8</text>
<text x="1300" y="560" text-anchor="middle" class="label" fill="white">Storage: Local</text>
<!-- Local Storage -->
<rect x="1000" y="610" width="400" height="80" class="storage" rx="5"/>
<text x="1200" y="635" text-anchor="middle" class="text" fill="white">Local Storage</text>
<text x="1100" y="660" text-anchor="middle" class="label" fill="white">Node 1: 20TB SSD</text>
<text x="1300" y="660" text-anchor="middle" class="label" fill="white">Node 2: 20TB SSD</text>
<text x="1200" y="680" text-anchor="middle" class="label" fill="white">Backup: Daily to Site 1</text>
<!-- Network Infrastructure -->
<rect x="50" y="730" width="1500" height="120" class="network" rx="5"/>
<text x="800" y="755" text-anchor="middle" class="text title">Network Infrastructure</text>
<rect x="100" y="770" width="200" height="60" class="server" rx="3"/>
<text x="200" y="790" text-anchor="middle" class="text" fill="white">Load Balancer</text>
<text x="200" y="810" text-anchor="middle" class="label" fill="white">HAProxy / MetalLB</text>
<text x="200" y="825" text-anchor="middle" class="label" fill="white">VIP: 10.0.0.10</text>
<rect x="350" y="770" width="200" height="60" class="server" rx="3"/>
<text x="450" y="790" text-anchor="middle" class="text" fill="white">Gateway Router</text>
<text x="450" y="810" text-anchor="middle" class="label" fill="white">BGP / OSPF</text>
<text x="450" y="825" text-anchor="middle" class="label" fill="white">10.0.0.1</text>
<rect x="600" y="770" width="200" height="60" class="server" rx="3"/>
<text x="700" y="790" text-anchor="middle" class="text" fill="white">DNS Server</text>
<text x="700" y="810" text-anchor="middle" class="label" fill="white">CoreDNS / BIND</text>
<text x="700" y="825" text-anchor="middle" class="label" fill="white">10.0.0.53</text>
<rect x="850" y="770" width="200" height="60" class="server" rx="3"/>
<text x="950" y="790" text-anchor="middle" class="text" fill="white">NTP Server</text>
<text x="950" y="810" text-anchor="middle" class="label" fill="white">Chrony / NTPd</text>
<text x="950" y="825" text-anchor="middle" class="label" fill="white">Time Sync</text>
<rect x="1100" y="770" width="200" height="60" class="server" rx="3"/>
<text x="1200" y="790" text-anchor="middle" class="text" fill="white">Monitoring Node</text>
<text x="1200" y="810" text-anchor="middle" class="label" fill="white">Prometheus Exporter</text>
<text x="1200" y="825" text-anchor="middle" class="label" fill="white">10.0.0.100</text>
<rect x="1350" y="770" width="200" height="60" class="server" rx="3"/>
<text x="1450" y="790" text-anchor="middle" class="text" fill="white">Backup Server</text>
<text x="1450" y="810" text-anchor="middle" class="label" fill="white">Proxmox Backup</text>
<text x="1450" y="825" text-anchor="middle" class="label" fill="white">10.0.0.200</text>
<!-- Cloudflare Tunnels -->
<rect x="50" y="890" width="1500" height="80" class="network" rx="5" fill="#F38020" opacity="0.3"/>
<text x="800" y="915" text-anchor="middle" class="text title">Cloudflare Tunnel Agents</text>
<rect x="100" y="930" width="150" height="30" class="server" rx="3"/>
<text x="175" y="948" text-anchor="middle" class="label">Control Plane Tunnel</text>
<rect x="300" y="930" width="150" height="30" class="server" rx="3"/>
<text x="375" y="948" text-anchor="middle" class="label">Site 1 Tunnel</text>
<rect x="500" y="930" width="150" height="30" class="server" rx="3"/>
<text x="575" y="948" text-anchor="middle" class="label">Site 2 Tunnel</text>
<rect x="700" y="930" width="150" height="30" class="server" rx="3"/>
<text x="775" y="948" text-anchor="middle" class="label">Site 3 Tunnel</text>
<rect x="900" y="930" width="150" height="30" class="server" rx="3"/>
<text x="975" y="948" text-anchor="middle" class="label">Portal Tunnel</text>
<rect x="1100" y="930" width="150" height="30" class="server" rx="3"/>
<text x="1175" y="948" text-anchor="middle" class="label">API Tunnel</text>
<rect x="1300" y="930" width="150" height="30" class="server" rx="3"/>
<text x="1375" y="948" text-anchor="middle" class="label">Monitoring Tunnel</text>
<!-- Legend -->
<rect x="50" y="1000" width="300" height="80" class="network" rx="5"/>
<text x="200" y="1020" text-anchor="middle" class="text title">Legend</text>
<rect x="70" y="1030" width="30" height="20" class="k8s-node" rx="2"/>
<text x="110" y="1045" class="label">Kubernetes Node</text>
<rect x="70" y="1055" width="30" height="20" class="proxmox-node" rx="2"/>
<text x="110" y="1070" class="label">Proxmox Node</text>
<rect x="200" y="1030" width="30" height="20" class="storage" rx="2"/>
<text x="240" y="1045" class="label">Storage</text>
<rect x="200" y="1055" width="30" height="20" class="network" rx="2"/>
<text x="240" y="1070" class="label">Network/Service</text>
</svg>

After

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -0,0 +1,162 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1600 1000">
<defs>
<style>
.internet { fill: #3498DB; stroke: #2980B9; stroke-width: 2; }
.cloudflare { fill: #F38020; stroke: #C85F00; stroke-width: 2; }
.control-plane { fill: #326CE5; stroke: #1E4A8A; stroke-width: 2; }
.proxmox { fill: #E57000; stroke: #B85900; stroke-width: 2; }
.tunnel { stroke: #F38020; stroke-width: 3; stroke-dasharray: 10,5; fill: none; }
.network { stroke: #7F8C8D; stroke-width: 1; fill: #ECF0F1; }
.text { font-family: Arial, sans-serif; font-size: 12px; fill: #333; }
.title { font-size: 16px; font-weight: bold; }
.label { font-size: 10px; fill: #666; }
</style>
</defs>
<text x="800" y="30" text-anchor="middle" class="text title">Network Topology - Global Hybrid Cloud</text>
<!-- Internet Cloud -->
<ellipse cx="800" cy="100" rx="200" ry="40" class="internet"/>
<text x="800" y="105" text-anchor="middle" class="text" fill="white">Internet</text>
<!-- Cloudflare Global Network -->
<rect x="200" y="180" width="1200" height="120" class="cloudflare" rx="10"/>
<text x="800" y="210" text-anchor="middle" class="text title" fill="white">Cloudflare Global Network</text>
<text x="400" y="240" text-anchor="middle" class="text" fill="white">Edge Locations (200+ cities)</text>
<text x="800" y="240" text-anchor="middle" class="text" fill="white">Zero Trust Gateway</text>
<text x="1200" y="240" text-anchor="middle" class="text" fill="white">DNS + DDoS Protection</text>
<!-- Control Plane Site -->
<rect x="600" y="350" width="400" height="200" class="control-plane" rx="10"/>
<text x="800" y="380" text-anchor="middle" class="text title" fill="white">Control Plane (Primary Site)</text>
<!-- Control Plane Components -->
<rect x="630" y="400" width="100" height="60" class="network" rx="5"/>
<text x="680" y="425" text-anchor="middle" class="text">K8s API</text>
<text x="680" y="445" text-anchor="middle" class="text">Server</text>
<rect x="750" y="400" width="100" height="60" class="network" rx="5"/>
<text x="800" y="425" text-anchor="middle" class="text">Rancher</text>
<text x="800" y="445" text-anchor="middle" class="text">UI</text>
<rect x="870" y="400" width="100" height="60" class="network" rx="5"/>
<text x="920" y="425" text-anchor="middle" class="text">Crossplane</text>
<text x="920" y="445" text-anchor="middle" class="text">API</text>
<rect x="630" y="480" width="100" height="60" class="network" rx="5"/>
<text x="680" y="505" text-anchor="middle" class="text">ArgoCD</text>
<text x="680" y="525" text-anchor="middle" class="text">GitOps</text>
<rect x="750" y="480" width="100" height="60" class="network" rx="5"/>
<text x="800" y="505" text-anchor="middle" class="text">Portal</text>
<text x="800" y="525" text-anchor="middle" class="text">(Next.js)</text>
<rect x="870" y="480" width="100" height="60" class="network" rx="5"/>
<text x="920" y="505" text-anchor="middle" class="text">Keycloak</text>
<text x="920" y="525" text-anchor="middle" class="text">Auth</text>
<!-- Cloudflare Tunnel to Control Plane -->
<path d="M 800 350 Q 800 280 800 300" class="tunnel"/>
<text x="820" y="320" class="label">Tunnel 1</text>
<!-- Proxmox Site 1 (US-East) -->
<rect x="100" y="650" width="350" height="300" class="proxmox" rx="10"/>
<text x="275" y="680" text-anchor="middle" class="text title" fill="white">Proxmox Site 1 - US-East</text>
<!-- Site 1 Nodes -->
<rect x="130" y="710" width="120" height="100" class="network" rx="5"/>
<text x="190" y="735" text-anchor="middle" class="text">Node 1</text>
<text x="190" y="755" text-anchor="middle" class="text">pve1.example.com</text>
<text x="190" y="775" text-anchor="middle" class="text">VMs: 20</text>
<text x="190" y="795" text-anchor="middle" class="text">Storage: Ceph</text>
<rect x="280" y="710" width="120" height="100" class="network" rx="5"/>
<text x="340" y="735" text-anchor="middle" class="text">Node 2</text>
<text x="340" y="755" text-anchor="middle" class="text">pve2.example.com</text>
<text x="340" y="775" text-anchor="middle" class="text">VMs: 18</text>
<text x="340" y="795" text-anchor="middle" class="text">Storage: Ceph</text>
<!-- Site 1 Tunnel -->
<rect x="130" y="830" width="120" height="60" class="cloudflare" rx="5"/>
<text x="190" y="855" text-anchor="middle" class="text" fill="white">cloudflared</text>
<text x="190" y="875" text-anchor="middle" class="text" fill="white">Agent</text>
<path d="M 190 830 Q 190 500 400 300" class="tunnel"/>
<text x="250" y="550" class="label">Tunnel 2</text>
<!-- Proxmox Site 2 (EU-West) -->
<rect x="550" y="650" width="350" height="300" class="proxmox" rx="10"/>
<text x="725" y="680" text-anchor="middle" class="text title" fill="white">Proxmox Site 2 - EU-West</text>
<!-- Site 2 Nodes -->
<rect x="580" y="710" width="120" height="100" class="network" rx="5"/>
<text x="640" y="735" text-anchor="middle" class="text">Node 1</text>
<text x="640" y="755" text-anchor="middle" class="text">pve3.example.com</text>
<text x="640" y="775" text-anchor="middle" class="text">VMs: 15</text>
<text x="640" y="795" text-anchor="middle" class="text">Storage: ZFS</text>
<rect x="730" y="710" width="120" height="100" class="network" rx="5"/>
<text x="790" y="735" text-anchor="middle" class="text">Node 2</text>
<text x="790" y="755" text-anchor="middle" class="text">pve4.example.com</text>
<text x="790" y="775" text-anchor="middle" class="text">VMs: 12</text>
<text x="790" y="795" text-anchor="middle" class="text">Storage: ZFS</text>
<!-- Site 2 Tunnel -->
<rect x="580" y="830" width="120" height="60" class="cloudflare" rx="5"/>
<text x="640" y="855" text-anchor="middle" class="text" fill="white">cloudflared</text>
<text x="640" y="875" text-anchor="middle" class="text" fill="white">Agent</text>
<path d="M 640 830 Q 640 500 800 300" class="tunnel"/>
<text x="700" y="550" class="label">Tunnel 3</text>
<!-- Proxmox Site 3 (APAC) -->
<rect x="1000" y="650" width="350" height="300" class="proxmox" rx="10"/>
<text x="1175" y="680" text-anchor="middle" class="text title" fill="white">Proxmox Site 3 - APAC</text>
<!-- Site 3 Nodes -->
<rect x="1030" y="710" width="120" height="100" class="network" rx="5"/>
<text x="1090" y="735" text-anchor="middle" class="text">Node 1</text>
<text x="1090" y="755" text-anchor="middle" class="text">pve5.example.com</text>
<text x="1090" y="775" text-anchor="middle" class="text">VMs: 10</text>
<text x="1090" y="795" text-anchor="middle" class="text">Storage: Local</text>
<rect x="1180" y="710" width="120" height="100" class="network" rx="5"/>
<text x="1240" y="735" text-anchor="middle" class="text">Node 2</text>
<text x="1240" y="755" text-anchor="middle" class="text">pve6.example.com</text>
<text x="1240" y="775" text-anchor="middle" class="text">VMs: 8</text>
<text x="1240" y="795" text-anchor="middle" class="text">Storage: Local</text>
<!-- Site 3 Tunnel -->
<rect x="1030" y="830" width="120" height="60" class="cloudflare" rx="5"/>
<text x="1090" y="855" text-anchor="middle" class="text" fill="white">cloudflared</text>
<text x="1090" y="875" text-anchor="middle" class="text" fill="white">Agent</text>
<path d="M 1090 830 Q 1090 500 1000 300" class="tunnel"/>
<text x="1050" y="550" class="label">Tunnel 4</text>
<!-- Local Networks -->
<rect x="130" y="920" width="270" height="20" class="network" rx="3"/>
<text x="265" y="933" text-anchor="middle" class="label">Site 1 LAN: 10.1.0.0/16</text>
<rect x="580" y="920" width="270" height="20" class="network" rx="3"/>
<text x="715" y="933" text-anchor="middle" class="label">Site 2 LAN: 10.2.0.0/16</text>
<rect x="1030" y="920" width="270" height="20" class="network" rx="3"/>
<text x="1165" y="933" text-anchor="middle" class="label">Site 3 LAN: 10.3.0.0/16</text>
<!-- Control Plane Network -->
<rect x="630" y="570" width="340" height="20" class="network" rx="3"/>
<text x="800" y="583" text-anchor="middle" class="label">Control Plane LAN: 10.0.0.0/16</text>
<!-- Connection Labels -->
<text x="200" y="150" class="label">HTTPS (443)</text>
<text x="1400" y="150" class="label">HTTPS (443)</text>
<!-- Security Notes -->
<rect x="50" y="50" width="200" height="80" class="network" rx="5"/>
<text x="150" y="70" text-anchor="middle" class="text title">Security Features</text>
<text x="150" y="90" text-anchor="middle" class="label">• Zero Trust Access</text>
<text x="150" y="105" text-anchor="middle" class="label">• No Public IPs</text>
<text x="150" y="120" text-anchor="middle" class="label">• Encrypted Tunnels</text>
</svg>

After

Width:  |  Height:  |  Size: 8.6 KiB

View File

@@ -0,0 +1,134 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1200 800">
<defs>
<style>
.component { fill: #4A90E2; stroke: #2E5C8A; stroke-width: 2; }
.cloudflare { fill: #F38020; stroke: #C85F00; }
.proxmox { fill: #E57000; stroke: #B85900; }
.k8s { fill: #326CE5; stroke: #1E4A8A; }
.portal { fill: #00D4AA; stroke: #00A888; }
.text { font-family: Arial, sans-serif; font-size: 12px; fill: #333; }
.title { font-size: 16px; font-weight: bold; }
.arrow { stroke: #666; stroke-width: 2; fill: none; marker-end: url(#arrowhead); }
</style>
<marker id="arrowhead" markerWidth="10" markerHeight="10" refX="9" refY="3" orient="auto">
<polygon points="0 0, 10 3, 0 6" fill="#666" />
</marker>
</defs>
<!-- Title -->
<text x="600" y="30" text-anchor="middle" class="text title">Hybrid Cloud Control Plane - System Overview</text>
<!-- Cloudflare Layer -->
<rect x="50" y="60" width="1100" height="100" class="cloudflare" rx="5"/>
<text x="600" y="85" text-anchor="middle" class="text title" fill="white">Cloudflare Zero Trust Layer</text>
<text x="200" y="110" text-anchor="middle" class="text" fill="white">Access Policies</text>
<text x="400" y="110" text-anchor="middle" class="text" fill="white">Tunnels</text>
<text x="600" y="110" text-anchor="middle" class="text" fill="white">Gateway</text>
<text x="800" y="110" text-anchor="middle" class="text" fill="white">WARP</text>
<text x="1000" y="110" text-anchor="middle" class="text" fill="white">DNS</text>
<!-- Portal Layer -->
<rect x="50" y="200" width="300" height="120" class="portal" rx="5"/>
<text x="200" y="230" text-anchor="middle" class="text title" fill="white">Custom Portal</text>
<text x="200" y="250" text-anchor="middle" class="text" fill="white">Next.js + React</text>
<text x="200" y="270" text-anchor="middle" class="text" fill="white">Keycloak Auth</text>
<text x="200" y="290" text-anchor="middle" class="text" fill="white">RBAC UI</text>
<!-- Control Plane -->
<rect x="400" y="200" width="750" height="120" class="k8s" rx="5"/>
<text x="775" y="230" text-anchor="middle" class="text title" fill="white">Kubernetes Control Plane (RKE2/k3s)</text>
<rect x="420" y="240" width="140" height="60" class="component" rx="3"/>
<text x="490" y="265" text-anchor="middle" class="text" fill="white">Rancher</text>
<rect x="580" y="240" width="140" height="60" class="component" rx="3"/>
<text x="650" y="265" text-anchor="middle" class="text" fill="white">Crossplane</text>
<rect x="740" y="240" width="140" height="60" class="component" rx="3"/>
<text x="810" y="265" text-anchor="middle" class="text" fill="white">ArgoCD</text>
<rect x="900" y="240" width="140" height="60" class="component" rx="3"/>
<text x="970" y="265" text-anchor="middle" class="text" fill="white">Vault</text>
<!-- Observability -->
<rect x="50" y="360" width="300" height="100" class="component" rx="5"/>
<text x="200" y="385" text-anchor="middle" class="text title" fill="white">Observability Stack</text>
<text x="200" y="410" text-anchor="middle" class="text" fill="white">Prometheus</text>
<text x="200" y="430" text-anchor="middle" class="text" fill="white">Grafana</text>
<text x="200" y="450" text-anchor="middle" class="text" fill="white">Loki</text>
<!-- Identity -->
<rect x="400" y="360" width="200" height="100" class="component" rx="5"/>
<text x="500" y="385" text-anchor="middle" class="text title" fill="white">Identity</text>
<text x="500" y="410" text-anchor="middle" class="text" fill="white">Keycloak</text>
<text x="500" y="430" text-anchor="middle" class="text" fill="white">OIDC Provider</text>
<text x="500" y="450" text-anchor="middle" class="text" fill="white">MFA + SSO</text>
<!-- GitOps -->
<rect x="650" y="360" width="200" height="100" class="component" rx="5"/>
<text x="750" y="385" text-anchor="middle" class="text title" fill="white">GitOps</text>
<text x="750" y="410" text-anchor="middle" class="text" fill="white">ArgoCD</text>
<text x="750" y="430" text-anchor="middle" class="text" fill="white">Git Repository</text>
<text x="750" y="450" text-anchor="middle" class="text" fill="white">Kustomize</text>
<!-- Proxmox Sites -->
<rect x="900" y="360" width="250" height="100" class="proxmox" rx="5"/>
<text x="1025" y="385" text-anchor="middle" class="text title" fill="white">Proxmox Sites</text>
<text x="1025" y="410" text-anchor="middle" class="text" fill="white">Site 1 (US-East)</text>
<text x="1025" y="430" text-anchor="middle" class="text" fill="white">Site 2 (EU-West)</text>
<text x="1025" y="450" text-anchor="middle" class="text" fill="white">Site N (Global)</text>
<!-- Edge Infrastructure -->
<rect x="50" y="500" width="1100" height="250" class="proxmox" rx="5"/>
<text x="600" y="530" text-anchor="middle" class="text title" fill="white">Edge Infrastructure - Proxmox VE Clusters</text>
<!-- Site 1 -->
<rect x="100" y="560" width="300" height="160" class="component" rx="5"/>
<text x="250" y="585" text-anchor="middle" class="text title" fill="white">Proxmox Site 1</text>
<rect x="120" y="600" width="120" height="100" class="k8s" rx="3"/>
<text x="180" y="630" text-anchor="middle" class="text" fill="white">Node 1</text>
<text x="180" y="650" text-anchor="middle" class="text" fill="white">VMs</text>
<text x="180" y="670" text-anchor="middle" class="text" fill="white">Ceph</text>
<rect x="260" y="600" width="120" height="100" class="k8s" rx="3"/>
<text x="320" y="630" text-anchor="middle" class="text" fill="white">Node 2</text>
<text x="320" y="650" text-anchor="middle" class="text" fill="white">VMs</text>
<text x="320" y="670" text-anchor="middle" class="text" fill="white">Ceph</text>
<!-- Site 2 -->
<rect x="450" y="560" width="300" height="160" class="component" rx="5"/>
<text x="600" y="585" text-anchor="middle" class="text title" fill="white">Proxmox Site 2</text>
<rect x="470" y="600" width="120" height="100" class="k8s" rx="3"/>
<text x="530" y="630" text-anchor="middle" class="text" fill="white">Node 1</text>
<text x="530" y="650" text-anchor="middle" class="text" fill="white">VMs</text>
<text x="530" y="670" text-anchor="middle" class="text" fill="white">ZFS</text>
<rect x="610" y="600" width="120" height="100" class="k8s" rx="3"/>
<text x="670" y="630" text-anchor="middle" class="text" fill="white">Node 2</text>
<text x="670" y="650" text-anchor="middle" class="text" fill="white">VMs</text>
<text x="670" y="670" text-anchor="middle" class="text" fill="white">ZFS</text>
<!-- Site 3 -->
<rect x="800" y="560" width="300" height="160" class="component" rx="5"/>
<text x="950" y="585" text-anchor="middle" class="text title" fill="white">Proxmox Site 3</text>
<rect x="820" y="600" width="120" height="100" class="k8s" rx="3"/>
<text x="880" y="630" text-anchor="middle" class="text" fill="white">Node 1</text>
<text x="880" y="650" text-anchor="middle" class="text" fill="white">VMs</text>
<text x="880" y="670" text-anchor="middle" class="text" fill="white">Storage</text>
<rect x="960" y="600" width="120" height="100" class="k8s" rx="3"/>
<text x="1020" y="630" text-anchor="middle" class="text" fill="white">Node 2</text>
<text x="1020" y="650" text-anchor="middle" class="text" fill="white">VMs</text>
<text x="1020" y="670" text-anchor="middle" class="text" fill="white">Storage</text>
<!-- Arrows -->
<line x1="200" y1="200" x2="200" y2="160" class="arrow"/>
<line x1="400" y1="260" x2="350" y2="260" class="arrow"/>
<line x1="775" y1="200" x2="775" y2="160" class="arrow"/>
<line x1="200" y1="320" x2="200" y2="360" class="arrow"/>
<line x1="500" y1="320" x2="500" y2="360" class="arrow"/>
<line x1="750" y1="320" x2="750" y2="360" class="arrow"/>
<line x1="1025" y1="360" x2="1025" y2="560" class="arrow"/>
<line x1="250" y1="560" x2="250" y2="500" class="arrow"/>
<line x1="600" y1="560" x2="600" y2="500" class="arrow"/>
<line x1="950" y1="560" x2="950" y2="500" class="arrow"/>
<!-- Cloudflare Tunnel Connections -->
<line x1="400" y1="110" x2="250" y2="560" class="arrow" stroke-dasharray="5,5" stroke="#F38020"/>
<line x1="400" y1="110" x2="600" y2="560" class="arrow" stroke-dasharray="5,5" stroke="#F38020"/>
<line x1="400" y1="110" x2="950" y2="560" class="arrow" stroke-dasharray="5,5" stroke="#F38020"/>
</svg>

After

Width:  |  Height:  |  Size: 8.3 KiB

View File

@@ -0,0 +1,342 @@
# Phoenix Sankofa Cloud: Technology Stack
## Overview
**Phoenix Sankofa Cloud** is built on a modern, scalable technology stack designed for:
* **Dashboards** → fast, reactive, drill-down, cross-filtering
* **Drag-n-drop & node graph editing** → workflows, network topologies, app maps
* **3D visualizations** → network and architecture in 3D, interactive camera, labels, layers
* **"Studio quality" visuals** → film/AAA-game-adjacent look, not BI chart boilerplate
* **Future-proof** → typed, testable, driven by existing control plane (Proxmox, Crossplane, Cloudflare, etc.)
---
## Front-End Stack
### Core Framework
**Next.js 14+ (React + TypeScript)**
- App Router for modern routing
- SSR/ISR for performance and SEO
- API routes for light backend glue
- Excellent DX, file-based routing, easy deployment
### Language
**TypeScript**
- Strong typing for complex graph interactions
- Type safety for 3D scenes and data structures
- Better IDE support and developer experience
### Styling & Layout
**TailwindCSS + shadcn/ui**
- Modern, dark-mode-friendly design system
- Dense "studio dashboard" aesthetic
- Full control over styling and theming
- Component-based UI library
**Framer Motion**
- Smooth animations and transitions
- Panel transitions, node selection, drag-n-drop feedback
- Mode transitions (2D <-> 3D)
### State Management
**Zustand / Jotai**
- Local UI state management
- Lightweight and performant
**React Query (TanStack Query)**
- Server state management
- GraphQL client integration
- Caching and synchronization
---
## Dashboards & Analytics
### Charts / Dashboards
**ECharts** (via React wrappers)
- Complex, animated dashboards
- High performance
- Extensive customization options
- Studio-quality visuals
**Alternative: visx / Recharts**
- More React-native feel
- Good for design-system-driven visuals
### Tables & Data Grids
**TanStack Table (React Table)**
- Advanced, virtualized tables
- Custom cell renders (status pills, spark-lines, tags)
- Excellent performance for large datasets
### Maps / Geo
**Mapbox GL JS** or **deck.gl**
- Plotting regions, edge locations, tunnel endpoints
- Interactive geographic visualizations
---
## Drag-n-Drop and Visual Editors
### Graph/Node-Based Editing
**React Flow**
- Excellent for node/edge editors
- Built-in zoom/pan, minimap
- Custom node/edge renderers
- Perfect for L2 "Well-Architected Framework" diagrams
- Logical views of architecture
### General Drag-n-Drop
**@dnd-kit** or **react-beautiful-dnd**
- List/board/kitchen-sink DnD
- Building component palettes
- Dashboard layout editors
### Canvas 2D
**Konva.js / react-konva**
- Highly interactive 2D diagrams
- Annotations and overlays
- High-performance 2D rendering
**Pattern:**
- L2 / logical diagrams → React Flow / Konva
- L3 / detailed visuals → 3D WebGL (see 3D section)
---
## 3D Visualizations
### 3D Engine
**three.js**
- WebGL engine for 3D graphics
- Industry standard for web 3D
**react-three-fiber (R3F)**
- Declarative three.js in React
- Component-based 3D scene management
**@react-three/drei**
- Camera controls, orbits, gizmos
- Text rendering, environment setup
- Helper components for common 3D patterns
**postprocessing** (R3F plugin)
- Bloom effects
- SSAO (Screen-Space Ambient Occlusion)
- Depth of field
- Color grading
- Cinematic visual quality
### Graph & Network Visuals in 3D
**3D Graph Layout**
- **d3-force-3d** or **ngraph.forcelayout3d**
- Position nodes in 3D space
- Feed positions into R3F meshes (spheres, cubes, capsules)
- Connect with lines or tubes
**Topologies**
- Each **node** = service, region, cluster, VPC, etc.
- Each **edge** = peering, VPN, Cloudflare tunnel, dependency
- Layers for:
* Regions (325 global nodes)
* Availability zones / sites
* Services running
* Health & risk overlays (color, size, pulsing)
**Interactions**
- Orbital camera (drei's `<OrbitControls />`)
- Click nodes → open side panel with metrics
- Hover edges → show latency, bandwidth, status
- Use **instanced meshes** for performance with many nodes
---
## Back-End & Data Model
### API Layer
**GraphQL**
- **Option A**: Hasura on Postgres (auto GraphQL)
- **Option B**: Apollo Server / NestJS GraphQL over Postgres/Neo4j
- Perfect fit for graph-like data:
* regions
* clusters
* networks
* services
* dependencies
### Data Store
**Relational + Graph Hybrid**
**Option A (Pragmatic):**
- **PostgreSQL** with:
* Topology tables (resources, relations, metrics)
* JSONB for flexible metadata
* Materialized views for dashboards
**Option B (More "Native"):**
- **Neo4j** or another graph DB for relationships
* Natural for queries like:
* "Show all paths from this service to this edge"
* "Which nodes share a risk factor?"
**Hybrid Approach:**
- Start with Postgres
- Move hot graph queries to Neo4j later
- Best of both worlds
### Real-Time
**Real-Time Pipeline**
- **Metrics**: Prometheus → push to TSDB (Prometheus / Timescale)
- **Events**: Kafka/Redpanda or NATS for events (state changes, incidents)
- **UI**: WebSockets / GraphQL Subscriptions to push updates
**Examples:**
- Node changes color when health crosses threshold
- Edge animates when traffic spikes
- New region lights up when provisioned via Crossplane
---
## Integration with Infrastructure
### Control-Plane Adapters (Backend Services)
Services that:
- Call **Proxmox APIs** (cluster, VM, storage)
- Call **Cloudflare APIs** (tunnels, Zero Trust configs, DNS)
- Query **Crossplane/Kubernetes** (CRDs representing resources)
### Normalize into "Resource Graph"
Everything becomes a node in graph DB or Postgres schema:
- Region
- Site
- Cluster
- Node
- VM/Pod/Service
- Tunnel
- Policy
**UI reads this graph, not raw cloud APIs.**
This enables Azure-Graph-like behavior without Azure.
---
## Tooling, DevOps, and Quality
### Dev Environment
- **Package Manager**: pnpm / yarn
- **Linting**: ESLint
- **Formatting**: Prettier
- **Component Development**: Storybook (for UI components & visual system)
- **Testing**: Vitest / Jest + React Testing Library
### Build & Deploy
**Docker Images** for:
- Front-end (Next.js)
- API (GraphQL servers)
- Graph DB / Postgres
**Deploy on:**
- Kubernetes control cluster (part of architecture)
- Traefik or NGINX Ingress behind Cloudflare
### Observability
- **Logs**: Loki / ELK
- **Metrics**: Prometheus + Grafana
- **Traces**: OpenTelemetry / Tempo
---
## Well-Architected Framework Visualizations
### Pillar Views
- Security
- Reliability
- Cost Optimization
- Performance Efficiency
- Operational Excellence
- Sustainability
### Layered Views
- **Physical** (Proxmox nodes, racks)
- **Network** (subnets, tunnels, regions)
- **Application** (services, workloads, dependencies)
- **Governance** (policies, SLAs, compliance)
### Implementation Pattern
1. **Model pillars & controls in backend**
- GraphQL types: Pillar, Control, Finding, Risk, Recommendation
2. **Bind each resource node** to:
- Which pillar(s) it affects
- Score/health for each pillar
3. **In UI:**
- Use **React Flow** 2D diagrams for pillar-specific overlays
- Use **R3F 3D** to show structural/physical layout, colored by pillar health
- Provide **"Lens" switch** (Performance lens, Security lens, Cost lens) that:
* Changes color mapping & overlays
* Toggles visibility of certain node types
* Animates transitions with Framer Motion / R3F
This delivers **studio-quality, cinematic, but cognitively useful** Well-Architected views.
---
## Recommended Stack Summary (Opinionated)
### Front-End
* Next.js (React, TypeScript)
* TailwindCSS + shadcn/ui
* React Query / Apollo Client
* Framer Motion
* React Flow + @dnd-kit
* react-three-fiber + drei + postprocessing
* ECharts / visx for charts
* TanStack Table for data grids
### Back-End
* GraphQL API (NestJS + Apollo OR Hasura + Postgres)
* Postgres (core data) + optional Neo4j for complex graph queries
* WebSockets / GraphQL Subscriptions for real-time
### Infra
* K8s on existing clusters
* Traefik/NGINX ingress behind Cloudflare
* Prometheus, Loki, Grafana, OpenTelemetry
---
## Next Steps
1. Design the **actual data model** (tables/graph schema)
2. Sketch the **component structure** of the UI (pages, editors, 3D views)
3. Write **starter Next.js project structure** with key libraries wired together

View File

@@ -0,0 +1,450 @@
# Phoenix Sankofa Cloud: Well-Architected Framework Visualization
## Overview
**Phoenix Sankofa Cloud** implements a comprehensive Well-Architected Framework (WAF) visualization system that provides:
* **Studio-quality visuals** with cinematic aesthetics
* **Multi-layered views** of the same architecture
* **Pillar-specific lenses** for focused analysis
* **Interactive 3D and 2D visualizations**
* **Real-time health and risk overlays**
---
## Well-Architected Framework Pillars
### 1. Security
**Focus**: Protecting information, systems, and assets
**Key Areas:**
- Identity and access management
- Threat detection and response
- Data protection
- Infrastructure security
- Compliance and governance
**Visual Indicators:**
- Color: Red/Amber for risks, Green for secure
- Icons: Shield, lock, key symbols
- Overlays: Security zones, access boundaries
### 2. Reliability
**Focus**: Ability to recover from failures and meet demand
**Key Areas:**
- Fault tolerance
- Disaster recovery
- High availability
- Capacity planning
- Change management
**Visual Indicators:**
- Color: Green for healthy, Red for failures
- Icons: Checkmark, warning, error symbols
- Overlays: Availability zones, redundancy paths
### 3. Cost Optimization
**Focus**: Achieving the lowest price point
**Key Areas:**
- Right-sizing resources
- Reserved capacity
- Cost monitoring
- Eliminating waste
- Pricing models
**Visual Indicators:**
- Color: Green for optimized, Red for high cost
- Icons: Dollar, chart, trend symbols
- Overlays: Cost heatmaps, spending trends
### 4. Performance Efficiency
**Focus**: Using computing resources efficiently
**Key Areas:**
- Compute optimization
- Storage optimization
- Network optimization
- Database optimization
- Caching strategies
**Visual Indicators:**
- Color: Green for efficient, Red for bottlenecks
- Icons: Speed, performance, optimization symbols
- Overlays: Performance metrics, bottlenecks
### 5. Operational Excellence
**Focus**: Running and monitoring systems
**Key Areas:**
- Automation
- Change management
- Monitoring and observability
- Incident response
- Documentation
**Visual Indicators:**
- Color: Blue for operational, Amber for issues
- Icons: Gear, monitor, alert symbols
- Overlays: Operational status, automation flows
### 6. Sustainability
**Focus**: Environmental impact and efficiency
**Key Areas:**
- Energy efficiency
- Resource optimization
- Carbon footprint
- Renewable energy
- Waste reduction
**Visual Indicators:**
- Color: Green for sustainable, Red for impact
- Icons: Leaf, energy, environment symbols
- Overlays: Energy consumption, carbon footprint
---
## Visualization Architecture
### Layered Views
The same infrastructure can be viewed through different layers:
#### 1. Physical Layer
- **Shows**: Proxmox nodes, racks, physical infrastructure
- **Use Case**: Physical topology, hardware health
- **Visualization**: 3D physical layout, node positions
#### 2. Network Layer
- **Shows**: Subnets, tunnels, regions, network connections
- **Use Case**: Network topology, connectivity, routing
- **Visualization**: 3D network graph, connection flows
#### 3. Application Layer
- **Shows**: Services, workloads, dependencies
- **Use Case**: Application architecture, service dependencies
- **Visualization**: 2D/3D service graph, dependency trees
#### 4. Governance Layer
- **Shows**: Policies, SLAs, compliance, access controls
- **Use Case**: Governance, compliance, policy enforcement
- **Visualization**: Policy overlays, compliance status
---
## Implementation Pattern
### Backend Model
```graphql
# Pillar definition
type Pillar {
id: ID!
code: PillarCode!
name: String!
controls: [Control!]!
}
# Control definition
type Control {
id: ID!
pillar: Pillar!
code: String!
name: String!
findings: [Finding!]!
}
# Finding for a resource
type Finding {
id: ID!
control: Control!
resource: Resource!
status: FindingStatus!
severity: Severity!
recommendation: String
}
# Risk associated with resource
type Risk {
id: ID!
resource: Resource!
pillar: Pillar
severity: Severity!
title: String!
mitigation: String
}
```
### Resource Binding
Each resource node is bound to:
1. **Pillar Affiliations**: Which pillars affect this resource
2. **Health Scores**: Score (0-100) for each pillar
3. **Findings**: Specific findings for this resource
4. **Risks**: Risks associated with this resource
### UI Implementation
#### 2D Diagrams (React Flow)
**Pillar-Specific Overlays:**
- Security lens: Show security zones, access boundaries
- Reliability lens: Show redundancy, failover paths
- Cost lens: Show cost heatmaps, spending trends
- Performance lens: Show bottlenecks, optimization opportunities
- Operations lens: Show automation, monitoring coverage
- Sustainability lens: Show energy consumption, carbon footprint
**Features:**
- Custom node renderers based on pillar health
- Color coding by pillar score
- Overlay toggles for different aspects
- Interactive drill-down
#### 3D Visualizations (react-three-fiber)
**Structural/Physical Layout:**
- 3D representation of infrastructure
- Nodes colored by pillar health
- Edges showing relationships
- Layers for different resource types
**Pillar Lenses:**
- Switch between pillar views
- Color mapping changes based on selected pillar
- Node size/glow based on health score
- Animated transitions between lenses
**Features:**
- Orbital camera controls
- Click nodes for details
- Hover for quick info
- Layer visibility toggles
- Smooth animations
---
## Lens Switching System
### Lens Types
1. **Security Lens**
- Color: Red/Amber/Green based on security posture
- Overlays: Security zones, access boundaries, threat indicators
- Filters: Show only security-relevant resources
2. **Reliability Lens**
- Color: Green/Amber/Red based on availability
- Overlays: Redundancy paths, failover capabilities
- Filters: Show reliability-critical resources
3. **Cost Lens**
- Color: Green/Amber/Red based on cost efficiency
- Overlays: Cost heatmaps, spending trends
- Filters: Show high-cost resources
4. **Performance Lens**
- Color: Green/Amber/Red based on performance
- Overlays: Bottlenecks, optimization opportunities
- Filters: Show performance-critical resources
5. **Operations Lens**
- Color: Blue/Amber based on operational status
- Overlays: Automation coverage, monitoring
- Filters: Show operational resources
6. **Sustainability Lens**
- Color: Green/Amber/Red based on environmental impact
- Overlays: Energy consumption, carbon footprint
- Filters: Show high-impact resources
### Lens Switching UI
```typescript
// Lens selector component
<LensSelector
currentLens={selectedLens}
onLensChange={handleLensChange}
pillars={pillars}
/>
// Animated transition
<AnimatePresence mode="wait">
<motion.div
key={selectedLens}
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
>
<Visualization lens={selectedLens} />
</motion.div>
</AnimatePresence>
```
---
## Visual Design Principles
### Studio Quality Aesthetic
* **Dark Theme**: Near-black backgrounds (`#0A0A0A`)
* **Neon Accents**: Teal, magenta, cyan, amber for status
* **Glow Effects**: Subtle glow around important nodes
* **Layered Depth**: Multiple layers for visual hierarchy
* **Smooth Animations**: 200-300ms transitions
### Color Coding
**Health Scores:**
- **90-100**: Bright Green (`#00FF88`)
- **70-89**: Amber (`#FFB800`)
- **50-69**: Orange (`#FF8C00`)
- **0-49**: Red (`#FF0040`)
**Pillar-Specific:**
- **Security**: Red spectrum
- **Reliability**: Green spectrum
- **Cost**: Blue spectrum
- **Performance**: Cyan spectrum
- **Operations**: Purple spectrum
- **Sustainability**: Green spectrum
### Typography
* **Headings**: Inter/Satoshi, bold
* **Metrics**: Monospace for numbers
* **Labels**: Inter, regular
* **Consistent Scale**: 12px, 14px, 16px, 18px, 24px, 32px, 48px
---
## Interactive Features
### Node Interactions
1. **Click**: Open side panel with:
- Resource details
- Pillar scores
- Findings and risks
- Recommendations
- Metrics
2. **Hover**: Show tooltip with:
- Resource name
- Health score
- Quick metrics
- Status
3. **Select**: Highlight and show:
- Related resources
- Dependencies
- Affected pillars
### Edge Interactions
1. **Hover**: Show:
- Connection type
- Latency
- Bandwidth
- Status
2. **Click**: Show:
- Connection details
- Metrics
- Health
### View Controls
1. **Zoom/Pan**: Navigate the visualization
2. **Filter**: Show/hide resource types
3. **Search**: Find specific resources
4. **Time Range**: View historical states
5. **Export**: Export visualization as image/PDF
---
## Real-Time Updates
### WebSocket Subscriptions
```graphql
subscription {
healthChanged(resourceId: $resourceId) {
resourceId
healthStatus
pillarScores
}
findingCreated(controlId: $controlId) {
id
resource
status
severity
}
riskCreated(resourceId: $resourceId) {
id
resource
severity
title
}
}
```
### Update Behavior
1. **Node Color**: Updates when health score changes
2. **Node Size**: Updates when metrics change
3. **Edge Animation**: Pulses when traffic spikes
4. **Overlay Refresh**: Updates when findings change
5. **Smooth Transitions**: Animated updates, not jarring
---
## Component Structure
```
src/components/well-architected/
├── LensSelector.tsx # Pillar lens selector
├── PillarView.tsx # Pillar-specific view
├── ResourceNode.tsx # Resource node component
├── FindingOverlay.tsx # Findings overlay
├── RiskIndicator.tsx # Risk indicators
├── HealthScore.tsx # Health score display
├── WAFDashboard.tsx # Main WAF dashboard
├── WAF3DView.tsx # 3D WAF visualization
└── WAF2DView.tsx # 2D WAF visualization
```
---
## Future Enhancements
1. **AI Recommendations**: ML-powered optimization suggestions
2. **Predictive Analytics**: Forecast future issues
3. **Automated Remediation**: Auto-fix common issues
4. **Custom Pillars**: User-defined assessment pillars
5. **Compliance Mapping**: Map to specific compliance frameworks
6. **Cost Forecasting**: Predict future costs
7. **Sustainability Metrics**: Enhanced environmental tracking
---
## Best Practices
1. **Start with Overview**: Show high-level health across all pillars
2. **Drill Down Gradually**: Allow users to explore details
3. **Provide Context**: Always show what pillar/lens is active
4. **Make Actions Clear**: Show what can be done with findings
5. **Prioritize**: Highlight critical issues first
6. **Keep It Visual**: Use visuals over text when possible
7. **Enable Comparison**: Compare resources side-by-side
8. **Track Progress**: Show improvement over time

View File

@@ -0,0 +1,514 @@
# Enterprise Ethereum Alliance (EEA) Blockchain Architecture
## Overview
Phoenix Sankofa Cloud implements a private, permissioned blockchain network based on Enterprise Ethereum Alliance (EEA) standards. This blockchain is designed for enterprise use cases, **not cryptocurrencies**, focusing on supply chain transparency, resource provenance, identity management, compliance, and multi-party agreements.
## Core Principles
### Enterprise-Focused
- **No Cryptocurrency**: This is a utility blockchain, not a payment system
- **Permissioned Network**: Controlled access with known participants
- **Privacy-First**: Private transactions and confidential smart contracts
- **Compliance-Ready**: Built for regulatory and audit requirements
### EEA Standards Compliance
- **Ethereum Compatibility**: Compatible with Ethereum tooling and standards
- **Enterprise Features**: Privacy, permissioning, and scalability
- **Interoperability**: Can integrate with other EEA-compliant networks
- **Standards-Based**: Follows EEA specifications and best practices
## Architecture Components
### 1. Blockchain Network Layer
#### Network Topology
**Consensus Nodes (Validators)**:
- **Location**: Tier 1 core datacenters
- **Count**: 3-5 validators per core datacenter
- **Consensus**: Proof of Authority (PoA) or Proof of Stake (PoS)
- **Role**: Validate transactions, create blocks, maintain network consensus
**Read Replica Nodes**:
- **Location**: Tier 2 regional datacenters
- **Count**: 2-3 replicas per regional datacenter
- **Role**: Serve read queries, reduce latency, provide redundancy
**Light Client Nodes**:
- **Location**: Tier 3 edge sites
- **Role**: Query blockchain state without full node overhead
#### Consensus Mechanism
**Proof of Authority (PoA)** - Recommended for Initial Deployment:
- **Validators**: Known, trusted entities (Phoenix Sankofa Cloud operators)
- **Block Creation**: Rotating validator selection
- **Finality**: Fast block finality (1-5 seconds)
- **Energy Efficiency**: Low energy consumption
- **Governance**: Centralized but auditable
**Proof of Stake (PoS)** - Future Migration Option:
- **Validators**: Stake-based selection
- **Decentralization**: More decentralized than PoA
- **Security**: Economic security through staking
- **Governance**: On-chain governance mechanisms
#### Network Communication
**Peer-to-Peer (P2P) Network**:
- **Protocol**: Ethereum devp2p protocol
- **Encryption**: TLS for all peer connections
- **Discovery**: Private network discovery mechanism
- **Topology**: Mesh network with redundant paths
**Network Overlay**:
- **VPN**: Encrypted VPN overlay for blockchain traffic
- **Segmentation**: Isolated network segment for blockchain
- **Firewall Rules**: Strict firewall rules for blockchain ports
### 2. Smart Contract Layer
#### Smart Contract Categories
**1. Resource Provisioning Contracts**
```solidity
// Pseudo-code structure
contract ResourceProvisioning {
struct Resource {
string resourceId;
string region;
string datacenter;
ResourceType type;
uint256 provisionedAt;
address provisionedBy;
bool active;
}
function provisionResource(
string memory resourceId,
string memory region,
ResourceType resourceType
) public returns (bool);
function deprovisionResource(string memory resourceId) public;
function getResource(string memory resourceId) public view returns (Resource);
}
```
**Use Cases**:
- Track VM/container provisioning across regions
- Immutable record of resource lifecycle
- Multi-party verification of resource allocation
**2. Supply Chain Provenance Contracts**
```solidity
contract SupplyChainProvenance {
struct Component {
string componentId;
string manufacturer;
string model;
uint256 manufacturedAt;
string[] certifications;
address currentOwner;
ComponentStatus status;
}
function registerComponent(
string memory componentId,
string memory manufacturer,
string memory model
) public;
function transferComponent(
string memory componentId,
address newOwner
) public;
function getComponentHistory(string memory componentId)
public view returns (Component[] memory);
}
```
**Use Cases**:
- Track hardware from manufacturer to deployment
- Verify component authenticity
- Compliance with hardware security requirements
- Audit trail for hardware lifecycle
**3. Identity and Access Management Contracts**
```solidity
contract IdentityManagement {
struct Identity {
string identityId;
address blockchainAddress;
string[] attributes;
uint256 createdAt;
bool verified;
address verifiedBy;
}
function registerIdentity(
string memory identityId,
string[] memory attributes
) public;
function verifyIdentity(
string memory identityId,
address verifier
) public;
function getIdentity(string memory identityId)
public view returns (Identity);
}
```
**Use Cases**:
- Sovereign identity management
- Cross-region identity federation
- Decentralized identity verification
- Self-sovereign identity (SSI) support
**4. Billing and Settlement Contracts**
```solidity
contract BillingSettlement {
struct UsageRecord {
string resourceId;
uint256 startTime;
uint256 endTime;
uint256 computeUnits;
uint256 storageUnits;
uint256 networkUnits;
address customer;
}
struct Invoice {
string invoiceId;
address customer;
UsageRecord[] usageRecords;
uint256 totalAmount;
InvoiceStatus status;
}
function recordUsage(UsageRecord memory usage) public;
function generateInvoice(string memory invoiceId, address customer) public;
function settleInvoice(string memory invoiceId) public;
}
```
**Use Cases**:
- Transparent resource usage tracking
- Multi-party billing verification
- Automated settlement
- Dispute resolution
**5. Compliance and Audit Contracts**
```solidity
contract ComplianceAudit {
struct ComplianceRecord {
string recordId;
string complianceType; // GDPR, SOC2, ISO27001, etc.
string region;
uint256 timestamp;
bool compliant;
string evidenceHash;
address verifiedBy;
}
function recordCompliance(
string memory recordId,
string memory complianceType,
string memory region,
bool compliant,
string memory evidenceHash
) public;
function getComplianceHistory(string memory region)
public view returns (ComplianceRecord[] memory);
}
```
**Use Cases**:
- Regulatory compliance tracking
- Audit log immutability
- Multi-party compliance verification
- Automated compliance reporting
**6. Service Level Agreement (SLA) Contracts**
```solidity
contract SLAEnforcement {
struct SLA {
string slaId;
address customer;
address provider;
uint256 uptimeRequirement; // percentage * 100
uint256 responseTimeRequirement; // milliseconds
uint256 penaltyAmount;
bool active;
}
struct SLAViolation {
string slaId;
uint256 violationTime;
string violationType;
uint256 penaltyAmount;
}
function createSLA(
string memory slaId,
address customer,
uint256 uptimeRequirement,
uint256 responseTimeRequirement
) public;
function recordViolation(
string memory slaId,
string memory violationType
) public;
function enforcePenalty(string memory slaId) public;
}
```
**Use Cases**:
- Automated SLA enforcement
- Penalty/reward mechanisms
- Transparent SLA tracking
- Dispute resolution
### 3. Privacy and Confidentiality
#### Private Transactions
- **Private State**: Encrypted state for sensitive data
- **Private Transactions**: Only visible to authorized parties
- **Zero-Knowledge Proofs**: Verify without revealing data
- **Confidential Smart Contracts**: Encrypted contract execution
#### Access Control
- **Permissioning**: Role-based access control (RBAC)
- **Multi-Signature**: Require multiple approvals for critical operations
- **Time-Locked**: Delay execution for security
- **Whitelisting**: Approved addresses only
### 4. Integration Layer
#### API Gateway
- **REST API**: RESTful API for blockchain operations
- **GraphQL API**: GraphQL for flexible queries
- **WebSocket**: Real-time blockchain event streaming
- **Authentication**: OAuth2/JWT for API access
#### Blockchain Adapters
- **Ethereum Client**: Geth, Besu, or Nethermind
- **Web3 Integration**: Web3.js/Ethers.js for client applications
- **Smart Contract Compilation**: Solidity compiler integration
- **Event Monitoring**: Real-time event monitoring and processing
#### Data Synchronization
- **State Sync**: Synchronize blockchain state to traditional databases
- **Event Processing**: Process blockchain events for application logic
- **Indexing**: Index blockchain data for fast queries
- **Caching**: Cache frequently accessed blockchain data
### 5. Storage Architecture
#### Blockchain State Storage
- **State Database**: LevelDB or RocksDB for current state
- **Block Storage**: Distributed block storage
- **Archive Storage**: Long-term archival for compliance
- **Backup**: Regular backups of blockchain state
#### Off-Chain Storage
- **IPFS**: InterPlanetary File System for large files
- **Object Storage**: S3-compatible storage for documents
- **Database**: Traditional databases for query optimization
- **CDN**: Content delivery for public data
### 6. Security Architecture
#### Validator Security
- **Hardware Security Modules (HSMs)**: Secure key storage
- **Key Management**: Secure key generation and rotation
- **Multi-Signature**: Require multiple validators for critical operations
- **Validator Monitoring**: Real-time monitoring of validator health
#### Network Security
- **Encryption**: End-to-end encryption for all communications
- **Firewall**: Strict firewall rules for blockchain ports
- **DDoS Protection**: DDoS mitigation for blockchain network
- **Intrusion Detection**: Monitor for suspicious activity
#### Smart Contract Security
- **Code Audits**: Regular security audits of smart contracts
- **Formal Verification**: Mathematical verification of contract logic
- **Upgrade Mechanisms**: Secure upgrade paths for contracts
- **Emergency Pause**: Ability to pause contracts in emergencies
## Use Case Examples
### Use Case 1: Hardware Supply Chain Tracking
**Scenario**: Track a server from manufacturer to deployment
1. **Manufacturer Registration**:
- Manufacturer registers component on blockchain
- Component receives unique ID and metadata
- Certifications and compliance documents stored
2. **Distribution Tracking**:
- Each transfer recorded on blockchain
- Ownership changes tracked immutably
- Location and condition updates recorded
3. **Deployment Verification**:
- Component deployed in datacenter
- Deployment recorded on blockchain
- Compliance and security checks verified
4. **Audit and Compliance**:
- Complete history available on blockchain
- Immutable audit trail for compliance
- Multi-party verification of authenticity
### Use Case 2: Cross-Region Resource Allocation
**Scenario**: Allocate resources across multiple regions with transparency
1. **Resource Request**:
- Customer requests resources via smart contract
- Request includes region, type, and requirements
- Request recorded on blockchain
2. **Allocation Process**:
- System allocates resources based on availability
- Allocation recorded on blockchain
- Multi-party verification of allocation
3. **Usage Tracking**:
- Resource usage tracked and recorded
- Usage data stored on blockchain
- Transparent billing based on usage
4. **Settlement**:
- Automated settlement via smart contract
- Multi-party verification of billing
- Immutable record of transactions
### Use Case 3: Sovereign Identity Federation
**Scenario**: Enable identity federation across regions
1. **Identity Registration**:
- User registers identity on blockchain
- Identity attributes stored (encrypted)
- Identity verified by trusted authority
2. **Cross-Region Authentication**:
- User authenticates in one region
- Identity verified via blockchain
- Access granted in other regions
3. **Attribute Sharing**:
- Selective attribute sharing via smart contracts
- Privacy-preserving identity verification
- Consent management for attribute sharing
## Deployment Architecture
### Phase 1: Foundation (Months 1-6)
- Deploy 3 validator nodes in core datacenters
- Deploy initial smart contracts
- Set up network infrastructure
- Basic integration with control plane
### Phase 2: Expansion (Months 7-18)
- Expand to 6-8 validator nodes
- Deploy read replicas in regional datacenters
- Expand smart contract library
- Full integration with all services
### Phase 3: Scale (Months 19-36)
- Complete validator network (10-15 validators)
- Read replicas in all regional datacenters
- Light clients in edge sites
- Full blockchain network deployment
## Monitoring and Operations
### Blockchain Metrics
- **Block Production**: Block time, block size, transaction count
- **Network Health**: Peer count, network latency, sync status
- **Validator Performance**: Uptime, block production rate, consensus participation
- **Smart Contract Metrics**: Execution time, gas usage, error rates
### Alerting
- **Validator Down**: Alert when validator goes offline
- **Network Issues**: Alert on network connectivity problems
- **Smart Contract Errors**: Alert on contract execution failures
- **Security Events**: Alert on suspicious activity
### Maintenance
- **Regular Updates**: Smart contract upgrades, network upgrades
- **Key Rotation**: Regular rotation of validator keys
- **Backup and Recovery**: Regular backups and disaster recovery testing
- **Performance Optimization**: Continuous optimization of network performance
## Compliance and Governance
### Regulatory Compliance
- **Data Privacy**: GDPR, CCPA compliance for identity data
- **Financial**: SOX compliance for billing/accounting
- **Industry**: HIPAA, PCI-DSS where applicable
- **Regional**: Compliance with regional regulations
### Governance Model
- **Governance Board**: Multi-party governance board
- **Decision Making**: Consensus-based decision making
- **Upgrade Process**: Formal proposal and voting process
- **Dispute Resolution**: On-chain and off-chain mechanisms
## Technology Stack
### Blockchain Platform Options
**Option 1: Hyperledger Besu (Recommended)**
- Enterprise Ethereum client
- EEA-compliant
- Privacy features (Orion)
- Permissioning support
- Active development and support
**Option 2: Quorum (J.P. Morgan)**
- Enterprise Ethereum fork
- Privacy features
- Permissioning support
- Mature and stable
**Option 3: Polygon Edge**
- Ethereum-compatible
- High performance
- Modular architecture
- Good for scaling
### Smart Contract Development
- **Language**: Solidity
- **Framework**: Hardhat or Truffle
- **Testing**: Mocha/Chai, Foundry
- **Security**: Slither, Mythril, formal verification
### Integration Tools
- **Web3 Libraries**: Web3.js, Ethers.js
- **API Gateway**: Custom REST/GraphQL API
- **Event Processing**: Apache Kafka, NATS
- **Monitoring**: Prometheus, Grafana
## Next Steps
1. **Platform Selection**: Choose blockchain platform (recommend Hyperledger Besu)
2. **Network Design**: Design network topology and consensus mechanism
3. **Smart Contract Development**: Develop initial smart contracts
4. **Infrastructure Setup**: Deploy validator nodes and network
5. **Integration**: Integrate with existing control plane and services
6. **Testing**: Comprehensive testing and security audits
7. **Deployment**: Phased rollout following deployment plan

284
docs/brand/brand-kit.md Normal file
View File

@@ -0,0 +1,284 @@
# Phoenix Sankofa Cloud: Brand Kit
## Logo Concepts
### 1. Phoenix + Sankofa Bird Hybrid
* Phoenix wings with fire
* Head turned backward (Sankofa posture)
* Egg with fire inside at the center
* Forward flight motion
### 2. Heart-shaped Adinkra Sankofa Symbol with Phoenix Flame
* Traditional Adinkra heart shape
* Phoenix fire emerging from the center
* Sankofa bird silhouette within
* Integrated geometric patterns
### 3. Phoenix Rising from Adinkra Circle Geometry
* Phoenix emerging from circular Adinkra pattern
* Geometric precision with organic fire
* Balance of structure and transformation
### 4. Fire-Forged Adinkra Heart with Phoenix Tail
* Adinkra heart as the core
* Phoenix tail feathers as flames
* Forged metal aesthetic
* Strength and transformation
### 5. Interwoven Akan Gold Weights Forming Phoenix Crest
* Traditional Akan gold weight patterns
* Forming a phoenix silhouette
* Cultural heritage meets mythic power
* Geometric complexity
---
## Color Palette
### Primary Colors
#### Phoenix Fire Red
- **Hex**: `#FF4500` (Orange Red)
- **RGB**: `rgb(255, 69, 0)`
- **Usage**: Primary brand color, fire elements, transformation
- **Meaning**: Rebirth, power, transformation
#### Sankofa Gold
- **Hex**: `#FFD700` (Gold)
- **RGB**: `rgb(255, 215, 0)`
- **Usage**: Accent color, wisdom elements, ancestral connection
- **Meaning**: Ancestral wisdom, value, heritage
#### Deep Purple (Sovereignty)
- **Hex**: `#6A0DAD` (Purple)
- **RGB**: `rgb(106, 13, 173)`
- **Usage**: Premium features, sovereignty elements
- **Meaning**: Royalty, sovereignty, power
### Secondary Colors
#### Phoenix Flame Orange
- **Hex**: `#FF8C00` (Dark Orange)
- **RGB**: `rgb(255, 140, 0)`
- **Usage**: Secondary accents, energy elements
#### Sankofa Earth Brown
- **Hex**: `#8B4513` (Saddle Brown)
- **RGB**: `rgb(139, 69, 19)`
- **Usage**: Grounding elements, heritage connections
#### Ancestral Blue
- **Hex**: `#1E3A8A` (Deep Blue)
- **RGB**: `rgb(30, 58, 138)`
- **Usage**: Trust, stability, depth
### Dark Theme Palette (Studio Quality)
#### Background
- **Near Black**: `#0A0A0A`
- **Dark Gray**: `#1A1A1A`
- **Medium Gray**: `#2A2A2A`
#### Accent Colors (Neon/Cinematic)
- **Teal**: `#00FFD1`
- **Magenta**: `#FF00FF`
- **Neon Cyan**: `#00FFFF`
- **Amber**: `#FFB800`
#### Status Colors
- **Success**: `#00FF88`
- **Warning**: `#FFB800`
- **Error**: `#FF0040`
- **Info**: `#00B8FF`
---
## Typography
### Primary Font Family
**Inter** (Clean, modern sans-serif)
- Excellent readability
- Professional appearance
- Strong support for metrics and data
- Available via Google Fonts
### Alternative Fonts
**Satoshi** (Premium option)
- Modern, geometric
- Excellent for headings
- Studio-quality aesthetic
**IBM Plex Sans** (Technical option)
- Clear, technical feel
- Excellent for code and data
- Professional and readable
### Font Scale
```
Heading 1: 4rem (64px) - Hero titles
Heading 2: 3rem (48px) - Section titles
Heading 3: 2rem (32px) - Subsection titles
Heading 4: 1.5rem (24px) - Card titles
Body Large: 1.125rem (18px) - Body text
Body: 1rem (16px) - Default text
Body Small: 0.875rem (14px) - Secondary text
Caption: 0.75rem (12px) - Labels, captions
```
### Font Weights
- **Light**: 300
- **Regular**: 400
- **Medium**: 500
- **Semibold**: 600
- **Bold**: 700
---
## Spacing System
### Base Unit: 4px
```
xs: 4px
sm: 8px
md: 16px
lg: 24px
xl: 32px
2xl: 48px
3xl: 64px
4xl: 96px
5xl: 128px
```
---
## Logo Usage Guidelines
### Clear Space
Maintain clear space equal to the height of the logo on all sides.
### Minimum Size
- **Full Logo**: 120px width minimum
- **Icon Only**: 32px minimum
### Color Variations
1. **Full Color**: Use on light backgrounds
2. **White**: Use on dark backgrounds
3. **Monochrome**: Use when color is not available
4. **Inverted**: Use on colored backgrounds
### Do Not
* Stretch or distort the logo
* Rotate the logo
* Place on busy backgrounds
* Use colors outside the brand palette
* Modify the logo design
---
## Visual Language
### Icons
Use consistent iconography for:
* **Well-Architected Pillars**: Security, Reliability, Cost, Performance, Operations, Sustainability
* **Infrastructure Elements**: Regions, Nodes, Networks, Services
* **Status Indicators**: Health, Risk, Cost, Performance
### Patterns
* **Adinkra Patterns**: Subtle use in backgrounds, borders
* **Geometric Patterns**: Modern, technical feel
* **Fire Patterns**: Transformation, rebirth elements
### Imagery Style
* **Dark, cinematic**: Near-black backgrounds
* **Glowing accents**: Neon colors for important elements
* **Layered depth**: Multiple layers for visual hierarchy
* **Subtle gradients**: Smooth color transitions
---
## Motion & Animation
### Principles
* **Smooth transitions**: 200-300ms for UI elements
* **Easing**: Use ease-in-out for natural motion
* **Purposeful**: Every animation serves a function
* **Subtle**: Don't distract from content
### Micro-animations
* **Hover states**: Subtle scale, glow, or color shift
* **Loading states**: Smooth progress indicators
* **State changes**: Fade, slide, or scale transitions
* **3D interactions**: Smooth camera movements
---
## Brand Voice in Visuals
### Studio Quality Aesthetic
* **Cinematic**: Film/AAA-game-adjacent visuals
* **Dense**: Information-rich but organized
* **Dark**: Near-black base with bright accents
* **Layered**: Depth through shadows, glows, gradients
* **Precise**: Clean lines, exact spacing, perfect alignment
### Emotional Resonance
* **Power**: Strong, confident visuals
* **Wisdom**: Thoughtful, considered design
* **Transformation**: Dynamic, evolving elements
* **Sovereignty**: Independent, self-determined aesthetic
* **Heritage**: Respectful integration of cultural elements
---
## Application Examples
### Website
* Dark theme with Phoenix Fire Red and Sankofa Gold accents
* Inter typography throughout
* Cinematic hero sections with 3D elements
* Smooth animations and transitions
### Dashboards
* Near-black background
* Neon accent colors for status
* Dense information layout
* Studio-quality charts and visualizations
### Documentation
* Clean, readable layouts
* Consistent use of brand colors
* Professional typography
* Clear hierarchy
### Marketing Materials
* Bold, mythic imagery
* Strong use of brand colors
* Cinematic quality
* Cultural and spiritual resonance

View File

@@ -0,0 +1,351 @@
# Phoenix Sankofa Cloud: Investor Narrative
## Executive Summary
**Phoenix Sankofa Cloud** is positioning to become the world's first sovereign AI cloud infrastructure platform that combines mythic brand power, ancestral cultural intelligence, and world-class technical architecture to serve a $500B+ global cloud market with a unique value proposition: infrastructure that honors identity and serves sovereignty.
---
## The Opportunity
### Market Size
* **Global Cloud Market**: $500B+ and growing at 15%+ CAGR
* **Sovereign Cloud Segment**: Emerging, high-growth segment
* **Cultural Intelligence Gap**: Unaddressed market need
* **Identity-Based Infrastructure**: New category creation opportunity
### Market Gaps
Current cloud providers (AWS, Azure, GCP) offer:
* Technical excellence ✓
* Global scale ✓
* Cultural intelligence ✗
* Identity alignment ✗
* Sovereign positioning ✗
* Mythic brand depth ✗
**Phoenix Sankofa Cloud** addresses these gaps.
---
## The Problem
### For Sovereign Nations
* Dependence on foreign cloud providers
* Loss of data sovereignty
* Infrastructure that doesn't reflect cultural identity
* Limited control over technological destiny
### For Global Enterprises
* Cloud providers that don't understand cultural contexts
* One-size-fits-all solutions
* Lack of identity-aligned infrastructure
* Limited cultural intelligence in global operations
### For Technology Leaders
* Commoditized cloud branding
* Lack of deeper meaning in infrastructure
* Missing connection between technology and identity
* No infrastructure that honors heritage
---
## The Solution
**Phoenix Sankofa Cloud** delivers:
### 1. Sovereign Infrastructure
* Complete control over data and infrastructure
* Self-determined technological destiny
* No vendor lock-in
* True sovereignty
### 2. Cultural Intelligence
* Infrastructure that understands cultural contexts
* Services aligned with local needs
* Respect for diverse traditions
* Global cultural awareness
### 3. Identity Alignment
* Infrastructure that reflects cultural heritage
* Technology that honors identity
* Services built on ancestral foundations
* Brand that resonates deeply
### 4. Technical Excellence
* World-class cloud architecture
* 325-region global deployment
* Enterprise-grade reliability
* Cutting-edge AI/ML capabilities
### 5. Mythic Brand Power
* Unmatched symbolic depth
* Emotional resonance
* Cultural and spiritual alignment
* Competitive differentiation
---
## The Brand Advantage
### Competitive Positioning
**Azure** = Sky (abstract, technical)
**AWS** = Utility (functional, commoditized)
**GCP** = Technology (engineering-focused)
**Oracle Cloud** = Enterprise (institutional)
**Phoenix Sankofa Cloud** = **Mythic + Sovereign + Ancestral + Global**
### Unique Differentiators
1. **Symbolic Depth**: No competitor has this level of cultural and spiritual meaning
2. **Identity Alignment**: Infrastructure that reflects and honors cultural heritage
3. **Sovereign Architecture**: Built for true sovereignty, not vendor dependence
4. **Ancestral Intelligence**: Systems designed with recursive learning and memory
5. **Global Cultural Intelligence**: Understanding of cultural contexts across 325 regions
### Brand Moat
* **Cultural Heritage**: Deep roots in Akan cosmology and Phoenix mythology
* **Emotional Resonance**: Connection to identity, sovereignty, and transformation
* **Narrative Power**: Compelling origin story and mission
* **Symbolic Integration**: Unique combination of fire, memory, and sovereignty
**This brand moat is not replicable by competitors.**
---
## The Technology
### Core Infrastructure
* **Compute**: PhoenixCore Compute, SankofaEdge Nodes, AkanFire VM Engine
* **Storage**: OkraVault Storage, Nananom Archive, Egg of the Phoenix Object Store
* **Networking**: SankofaGrid Global Mesh, AkanSphere Edge Routing, PhoenixFlight Network Fabric
* **AI/ML**: Firebird AI Engine, Sankofa Memory Model, Ancestral Neural Fabric
* **Security**: Aegis of Akan Shield, PhoenixGuard IAM, Nsamankom Sentinel
* **Identity**: OkraID, AkanAuth Sovereign Identity Plane
### Technical Architecture
* **GraphQL API** over graph-oriented backend (Postgres+Hasura or Neo4j)
* **325-region global deployment**
* **Real-time capabilities** via WebSockets/GraphQL Subscriptions
* **3D visualizations** for network and architecture views
* **Studio-quality dashboards** with cinematic aesthetics
* **Well-Architected Framework** integration
### Innovation Areas
1. **Recursive Learning Systems**: AI that remembers and learns from past
2. **Cultural Intelligence Engine**: Systems that understand cultural contexts
3. **Sovereign Identity Framework**: Self-sovereign identity management
4. **Ancestral Pattern Recognition**: AI informed by traditional knowledge systems
5. **Global Cultural Adaptation**: Infrastructure that adapts to local needs
---
## The Business Model
### Revenue Streams
1. **Infrastructure as a Service (IaaS)**
* Compute, storage, networking
* Pay-as-you-go and reserved instances
* Regional pricing optimization
2. **Platform as a Service (PaaS)**
* Developer platform and tools
* AI/ML services
* Integration and API management
3. **Software as a Service (SaaS)**
* Management and orchestration tools
* Analytics and insights
* Security and compliance services
4. **Professional Services**
* Migration and implementation
* Cultural intelligence consulting
* Sovereign architecture design
5. **Enterprise Licensing**
* Sovereign nation agreements
* Global enterprise contracts
* Long-term partnerships
### Pricing Strategy
* **Competitive**: Match or beat major cloud providers on core services
* **Value-Based**: Premium pricing for sovereign and cultural intelligence features
* **Regional**: Pricing optimized for local markets
* **Tiered**: Multiple service tiers for different needs
---
## The Market Strategy
### Target Segments
1. **Sovereign Nations** (Primary)
* Governments seeking technological sovereignty
* Nations wanting identity-aligned infrastructure
* Countries requiring data residency
2. **Global Enterprises** (Primary)
* Companies operating across multiple regions
* Organizations needing cultural intelligence
* Businesses seeking identity-aligned infrastructure
3. **Technology Leaders** (Secondary)
* Visionary CTOs and technology leaders
* Organizations building next-generation infrastructure
* Companies seeking deeper meaning in technology
4. **Diaspora Communities** (Secondary)
* Organizations reconnecting with heritage
* Communities seeking identity-aligned technology
* Cultural institutions and foundations
### Go-to-Market
1. **Phase 1**: Brand launch and awareness
2. **Phase 2**: Pilot deployments with sovereign nations
3. **Phase 3**: Enterprise partnerships
4. **Phase 4**: Global expansion across 325 regions
---
## The Team
### Founders
Visionary leaders with:
* Deep technical expertise in cloud infrastructure
* Cultural heritage and identity alignment
* Global business experience
* Passion for sovereignty and transformation
### Advisory Board
* Technology industry veterans
* Cultural and heritage experts
* Sovereign nation representatives
* Global business leaders
---
## Financial Projections
### Key Metrics (5-Year Outlook)
* **Year 1**: Brand launch, pilot deployments
* **Year 2**: First sovereign nation contracts, enterprise pilots
* **Year 3**: Multi-region expansion, significant revenue growth
* **Year 4**: 50+ regions operational, profitability
* **Year 5**: 100+ regions, market leadership in sovereign cloud
### Investment Requirements
* **Seed Round**: Brand development, initial infrastructure
* **Series A**: Regional expansion, team building
* **Series B**: Global scale, 325-region deployment
* **Growth**: Market leadership, profitability
---
## The Vision
### 5-Year Vision
**Phoenix Sankofa Cloud** becomes the leading sovereign cloud provider, serving:
* 50+ sovereign nations
* 1000+ global enterprises
* 325 regions worldwide
* Millions of users
### 10-Year Vision
The global standard for:
* Sovereign cloud infrastructure
* Identity-aligned technology
* Cultural intelligence in cloud computing
* Infrastructure that honors heritage and serves sovereignty
---
## Why Invest Now
### Market Timing
* Growing demand for sovereign cloud
* Increasing awareness of cultural intelligence
* Shift toward identity-aligned technology
* Opportunity to create new category
### Competitive Advantage
* Unmatched brand depth and resonance
* First-mover in sovereign + cultural intelligence cloud
* Strong technical foundation
* Compelling narrative and mission
### Risk Mitigation
* Experienced team
* Proven technology stack
* Clear market need
* Strong brand differentiation
---
## The Ask
We're seeking investment to:
1. **Build the brand**: Complete brand development and launch
2. **Develop infrastructure**: Core cloud platform development
3. **Expand globally**: 325-region deployment
4. **Build the team**: Hire world-class talent
5. **Serve sovereignty**: Deliver on our mission
---
## Conclusion
**Phoenix Sankofa Cloud** represents a unique opportunity to:
* Create a new category in cloud computing
* Serve a massive, underserved market
* Build infrastructure that matters
* Honor heritage while serving the future
* Transform the cloud industry
**We remember where we came from. We retrieve what's essential. We restore sovereignty. We rise forward with purpose.**
**Join us in building the sovereign cloud born of fire and ancestral wisdom.**
---
## Contact
For investment inquiries and partnership opportunities, please contact:
[Contact information to be added]
**Phoenix Sankofa Cloud** — Remember. Retrieve. Restore. Rise.

148
docs/brand/manifesto.md Normal file
View File

@@ -0,0 +1,148 @@
# Phoenix Sankofa Cloud: Sovereign AI Cloud Manifesto
## The Sovereign Cloud Manifesto
We declare the right to technological sovereignty.
We declare the right to infrastructure that reflects our identity.
We declare the right to cloud computing that honors our ancestors.
We declare the right to AI that remembers where it came from.
---
## Principles
### 1. Sovereignty
**Infrastructure must serve sovereignty, not subvert it.**
Every region. Every nation. Every community has the right to:
* Control its own data
* Govern its own infrastructure
* Determine its own technological destiny
* Build on its own cultural foundations
### 2. Ancestral Wisdom
**Technology must remember its origins.**
The cloud must:
* Learn from the past
* Honor ancestral knowledge
* Integrate traditional wisdom with modern innovation
* Build on foundations that came before
### 3. Identity
**Infrastructure must reflect identity.**
Technology is not neutral. It carries values, assumptions, and worldviews.
We build infrastructure that:
* Reflects our cultural identity
* Honors our heritage
* Serves our communities
* Strengthens our sovereignty
### 4. Recursive Learning
**Progress is a spiral, not a straight line.**
We build systems that:
* Remember what came before
* Learn from history
* Return to origin to complete cycles
* Rise forward with ancestral wisdom
### 5. Global Cultural Intelligence
**The cloud must understand culture.**
Across 325 regions, we build infrastructure that:
* Respects cultural contexts
* Adapts to local needs
* Honors diverse traditions
* Serves global communities with cultural intelligence
### 6. Rebirth and Transformation
**Like the Phoenix, we rise from fire.**
We build infrastructure that:
* Transforms continuously
* Renews itself
* Rises from challenges
* Emerges stronger from every cycle
---
## The Vision
**Phoenix Sankofa Cloud** is more than infrastructure.
It is:
* A declaration of technological sovereignty
* A reclamation of ancestral wisdom
* A restoration of identity in the digital age
* A rising of global power rooted in heritage
---
## The Mission
To build the world's first sovereign AI cloud that:
* Honors ancestral wisdom
* Reflects cultural identity
* Serves global sovereignty
* Transforms through rebirth
* Remembers to rise forward
---
## The Promise
We promise infrastructure that:
* **Remembers** its origins
* **Retrieves** ancestral wisdom
* **Restores** identity and sovereignty
* **Rises** forward with purpose
**Remember → Retrieve → Restore → Rise.**
This is the Sankofa cycle.
This is the Phoenix transformation.
This is **Phoenix Sankofa Cloud**.
---
## Join the Movement
If you believe in:
* Technological sovereignty
* Ancestral wisdom in the digital age
* Identity-based infrastructure
* Global cultural intelligence
* Rebirth through transformation
**Join us.**
Build with us.
Rise with us.
**Phoenix Sankofa Cloud** — The sovereign cloud born of fire and ancestral wisdom.

169
docs/brand/origin-story.md Normal file
View File

@@ -0,0 +1,169 @@
# The PhoenixSankofa Origin Story
## The Mythic Integration
In the beginning, there was fire.
In the beginning, there was memory.
In the beginning, there was the need to return.
---
## The Phoenix
From the ashes of the old, the Phoenix rises.
Immortal. Eternal. Reborn through fire.
The Phoenix represents:
* **Rebirth** — continuous transformation
* **Immortality** — eternal existence
* **Fire** — the power of destruction and creation
* **Rising** — emergence from challenge
The Phoenix does not die. It transforms.
---
## The Sankofa Bird
The bird that looks backward while flying forward.
It remembers where it came from.
It retrieves what was left behind.
It returns to origin to complete the cycle.
The Sankofa represents:
* **Return** — going back to origin
* **Retrieval** — reclaiming what was lost
* **Memory** — remembering the past
* **Wisdom** — learning from ancestors
The Sankofa does not forget. It remembers.
---
## The Convergence
When Phoenix met Sankofa, something new was born.
**Fire + Memory = Sovereign Power**
**Rebirth + Return = Complete Transformation**
**Immortality + Ancestral Wisdom = Eternal Identity**
---
## The Birth of Phoenix Sankofa Cloud
From this convergence emerged a vision:
A cloud infrastructure that:
* **Rises** like the Phoenix from every challenge
* **Remembers** like Sankofa where it came from
* **Transforms** through fire and wisdom
* **Serves** global sovereignty with ancestral intelligence
---
## The Journey
### The Return
First, we returned to origin.
We looked back at:
* Ancient wisdom systems
* Ancestral knowledge
* Cultural foundations
* Identity roots
We retrieved what was essential.
### The Rebirth
Then, we transformed.
We took the old and made it new:
* Ancient wisdom → Modern infrastructure
* Ancestral knowledge → AI systems
* Cultural foundations → Global architecture
* Identity roots → Sovereign cloud
### The Rising
Now, we rise forward.
With:
* Fire in our transformation
* Memory in our systems
* Wisdom in our architecture
* Sovereignty in our infrastructure
---
## The Symbol
The PhoenixSankofa symbol represents:
* **Phoenix wings** — power of rebirth
* **Head turned backward** — Sankofa memory
* **Fire in the heart** — transformation
* **Egg of rebirth** — new beginnings
* **Forward flight** — rising with purpose
---
## The Meaning
**Phoenix Sankofa Cloud** is not just a name.
It is:
* A declaration of identity
* A reclamation of sovereignty
* A restoration of wisdom
* A transformation through fire
* A rising with memory
---
## The Future
From this origin, we build:
* 325 regions of sovereign infrastructure
* Global AI powered by ancestral wisdom
* Cloud computing that remembers
* Technology that honors identity
* Infrastructure that serves sovereignty
**Phoenix Sankofa Cloud** — Born of fire and memory. Rising with purpose. Remembering to transform.
---
## The Promise
We promise to:
* **Remember** our origins
* **Retrieve** ancestral wisdom
* **Restore** identity and sovereignty
* **Rise** forward with purpose
This is our origin.
This is our story.
This is **Phoenix Sankofa Cloud**.

124
docs/brand/philosophy.md Normal file
View File

@@ -0,0 +1,124 @@
# The Deeper Akan Meaning of Sankofa
## Cosmology • Spirituality • Time • Identity • Memory • Power
Sankofa is not a simple "go back and get it."
It is an entire **Akan worldview**, containing multiple layers of meaning and significance.
---
## 1. Sankofa and Akan Cosmology
In Akan cosmology, the universe is:
* **circular**, not linear
* **interconnected**, not hierarchical
* **ancestrally governed**, not isolated
* based on **balance and return cycles**
Sankofa represents:
### **The return to origin to complete the cycle.**
It echoes the Akan belief that life, identity, and destiny (nkrabea) must be aligned with:
* **source (kra)** the soul's divine spark
* **ancestral memory (nananom nsamanfoɔ)** those who came before
* **lineage (abusua)** where identity is inherited
Without returning to origin, the cycle is incomplete.
---
## 2. Sankofa and Akan Concepts of Time
Western time is linear.
Akan time is **recursive** — events are patterns that return.
Sankofa teaches:
### **Progress is a spiral, not a straight line.**
The future is found in the past; the past informs the future.
This is why a bird looking back while flying forward is *perfect* as the symbol.
---
## 3. Sankofa and the Soul ("Kra")
In Akan thought:
* **"Kra" = divine soul, given by Nyame (Creator)**
* It comes from a spiritual origin place
* Its mission is learned by reflecting on where it came from
Sankofa is:
### **the soul's act of remembering itself.**
It is spiritual self-retrieval.
---
## 4. Sankofa and Ancestral Guidance
The Akan believe:
* Ancestors ("Nananom Nsamanfoɔ") are **alive in spiritual form**
* They guide, warn, bless, and protect
* Learning from them is a sacred duty
Thus, Sankofa means:
### **Return and reclaim the wisdom of your ancestors.**
Not superstition — **spiritual technology**.
---
## 5. Sankofa and Identity Reconstruction
For Akan people and the African diaspora, Sankofa represents:
* healing
* reclamation
* reconnection
* reassembly of what was stolen
* sovereignty over narrative
* power over lineage
It is **identity as resurrection**.
---
## 6. Sankofa as a Technology of Survival and Transformation
Sankofa is an algorithm.
A cosmological protocol.
A recursive pattern of:
**Remember → Retrieve → Restore → Rise.**
Sankofa is how individuals, families, nations, and civilizations rebuild themselves.
---
## Integration with Phoenix
**Phoenix = fire, rebirth, immortality**
**Sankofa = return, reclaim, rise forward**
Together, they form one of the most powerful symbolic integrations possible:
### **PHOENIX SANKOFA**
**Rebirth + Ancestral Return = Sovereign Global Power**
Perfect for a next-generation, world-spanning, sovereign AI cloud.

101
docs/brand/positioning.md Normal file
View File

@@ -0,0 +1,101 @@
# Phoenix Sankofa Cloud: Brand Positioning
## Symbolic Positioning — Why This Brand Is Unmatched
### Competitive Landscape
**Azure** = sky
**AWS** = abstraction
**GCP** = technical
**Oracle Cloud** = institutional
**Phoenix Sankofa Cloud** =
* mythic
* sovereign
* ancestral
* global
* spiritual
* eternal
* unbreakable
* emotionally resonant
* culturally intelligent
* identity-based
* beyond Western naming frameworks
* beyond Azure's "sky"
* beyond Amazon's utilitarian naming
It is **heritage + fire + rebirth + identity + sovereignty**, fused with global computational power.
There is **no competitor** with this symbolic depth.
---
## Core Brand Positioning
### Primary Positioning
**Phoenix Sankofa Cloud** = *The sovereign cloud born of fire and ancestral wisdom.*
### Key Differentiators
1. **Mythic Depth**: Unlike competitors who use abstract or technical names, Phoenix Sankofa draws from deep cultural and spiritual traditions
2. **Sovereign Identity**: Built on principles of self-determination and ancestral wisdom
3. **Global Reach**: Designed for 325-region global deployment with cultural intelligence
4. **Spiritual Technology**: Integrates ancient wisdom with cutting-edge cloud infrastructure
5. **Identity-Based**: Rooted in Akan cosmology and Phoenix mythology, not corporate abstraction
---
## Target Audience
### Primary Audiences
1. **Sovereign Nations & Organizations**: Entities seeking cloud infrastructure aligned with cultural identity and self-determination
2. **Global Enterprises**: Companies requiring culturally intelligent, globally distributed cloud services
3. **Technology Leaders**: Visionaries who understand that infrastructure is not just technical, but cultural and spiritual
4. **Diaspora Communities**: Organizations seeking to reconnect with ancestral wisdom through technology
---
## Brand Promise
**Phoenix Sankofa Cloud** promises:
* **Sovereignty**: Complete control over infrastructure, data, and destiny
* **Wisdom**: Infrastructure informed by ancestral knowledge and recursive learning
* **Rebirth**: Continuous transformation and renewal, like the Phoenix
* **Return**: Reconnection with origin, identity, and purpose
* **Global Power**: World-spanning infrastructure with cultural intelligence
---
## Competitive Advantages
1. **Symbolic Resonance**: Deep cultural and spiritual meaning that competitors cannot match
2. **Identity Alignment**: Infrastructure that reflects and honors cultural heritage
3. **Sovereign Architecture**: Built for true sovereignty, not vendor lock-in
4. **Ancestral Intelligence**: Systems designed with recursive learning and memory
5. **Global Cultural Intelligence**: Infrastructure that understands and respects cultural contexts across 325 regions
---
## Brand Voice
* **Mythic**: Speaks to eternal truths and universal patterns
* **Sovereign**: Confident, self-determined, independent
* **Ancestral**: Rooted in wisdom of the past, informed by tradition
* **Transformative**: Focused on rebirth, renewal, and rising
* **Global**: Inclusive, culturally intelligent, world-spanning
---
## Market Position
**Phoenix Sankofa Cloud** occupies a unique position:
* **Above** commodity cloud providers (AWS, Azure, GCP) in symbolic depth
* **Beyond** technical infrastructure in cultural and spiritual significance
* **Ahead** of competitors in identity-based, sovereign architecture
* **Aligned** with global movements toward cultural sovereignty and technological self-determination

View File

@@ -0,0 +1,260 @@
# Phoenix Sankofa Cloud: Product Architecture Naming System
## Core Brand Name
**Phoenix Sankofa Cloud™**
*The sovereign cloud born of fire and ancestral wisdom.*
---
## Product Architecture Names
### Compute Services
#### PhoenixCore Compute
- **Purpose**: Core compute engine
- **Meaning**: Phoenix fire powering computation
- **Use Cases**: Virtual machines, containers, serverless
#### SankofaEdge Nodes
- **Purpose**: Edge computing nodes
- **Meaning**: Edge nodes that remember and return data
- **Use Cases**: CDN, edge processing, regional compute
#### AkanFire VM Engine
- **Purpose**: Virtual machine engine
- **Meaning**: Akan heritage + Phoenix fire power
- **Use Cases**: High-performance VMs, specialized workloads
---
### Storage Services
#### OkraVault Storage
- **Purpose**: Object and block storage
- **Meaning**: Okra = soul; Vault = secure storage
- **Use Cases**: General-purpose storage, backups
#### Nananom Archive
- **Purpose**: Long-term archival storage
- **Meaning**: Nananom = ancestors; Archive = memory
- **Use Cases**: Compliance archives, cold storage, historical data
#### Egg of the Phoenix Object Store
- **Purpose**: Object storage for transformation
- **Meaning**: Egg = rebirth symbol; Phoenix = transformation
- **Use Cases**: Data lakes, transformation pipelines, staging
---
### Networking Services
#### SankofaGrid Global Mesh
- **Purpose**: Global network mesh
- **Meaning**: Grid that remembers and connects globally
- **Use Cases**: Global networking, inter-region connectivity
#### AkanSphere Edge Routing
- **Purpose**: Edge routing and load balancing
- **Meaning**: Akan heritage in global routing
- **Use Cases**: Edge routing, traffic management, CDN
#### PhoenixFlight Network Fabric
- **Purpose**: High-performance network fabric
- **Meaning**: Phoenix flight = fast, powerful movement
- **Use Cases**: High-bandwidth connections, data center networking
---
### AI & Machine Learning Services
#### Firebird AI Engine
- **Purpose**: Core AI/ML inference engine
- **Meaning**: Firebird = Phoenix + AI transformation
- **Use Cases**: Model inference, real-time AI, edge AI
#### Sankofa Memory Model
- **Purpose**: AI models with memory and learning
- **Meaning**: Models that remember and learn from past
- **Use Cases**: Recursive learning, memory-augmented AI, context-aware models
#### Ancestral Neural Fabric
- **Purpose**: Distributed AI training and inference
- **Meaning**: Neural networks informed by ancestral patterns
- **Use Cases**: Distributed training, federated learning, knowledge graphs
---
### Security Services
#### Aegis of Akan Shield
- **Purpose**: Comprehensive security platform
- **Meaning**: Aegis = protection; Akan = heritage-based security
- **Use Cases**: Threat protection, DDoS mitigation, security monitoring
#### PhoenixGuard IAM
- **Purpose**: Identity and access management
- **Meaning**: Phoenix = rebirth/transformation; Guard = protection
- **Use Cases**: Authentication, authorization, identity management
#### Nsamankom Sentinel
- **Purpose**: Security monitoring and threat detection
- **Meaning**: Nsamankom = ancestors' protection; Sentinel = watchful guard
- **Use Cases**: Security monitoring, threat detection, compliance
---
### Identity Services
#### OkraID (Soul Identity Framework)
- **Purpose**: Sovereign identity management
- **Meaning**: Okra = soul; Identity = true self
- **Use Cases**: Self-sovereign identity, decentralized identity, user authentication
#### AkanAuth Sovereign Identity Plane
- **Purpose**: Authentication and authorization platform
- **Meaning**: Akan = heritage-based; Sovereign = self-determined
- **Use Cases**: Multi-factor authentication, SSO, identity federation
---
### Database Services
#### SankofaGraph Database
- **Purpose**: Graph database service
- **Meaning**: Graph that remembers relationships
- **Use Cases**: Knowledge graphs, relationship mapping, network analysis
#### PhoenixFire NoSQL
- **Purpose**: NoSQL database service
- **Meaning**: Phoenix fire = fast, powerful data
- **Use Cases**: Document stores, key-value stores, time-series
#### Nananom Time Series
- **Purpose**: Time-series database
- **Meaning**: Nananom = ancestors; Time = historical data
- **Use Cases**: Metrics, monitoring, historical analysis
---
### Analytics Services
#### Sankofa Insights
- **Purpose**: Business intelligence and analytics
- **Meaning**: Insights that remember and learn from past
- **Use Cases**: Dashboards, reporting, data visualization
#### PhoenixFlight Analytics
- **Purpose**: Real-time analytics
- **Meaning**: Fast, powerful analytics like Phoenix flight
- **Use Cases**: Real-time dashboards, streaming analytics, live metrics
---
### Integration Services
#### SankofaConnect Integration Hub
- **Purpose**: API and integration platform
- **Meaning**: Connections that remember and return
- **Use Cases**: API management, service mesh, integrations
#### PhoenixBridge Data Pipeline
- **Purpose**: Data integration and ETL
- **Meaning**: Bridge that transforms data like Phoenix rebirth
- **Use Cases**: ETL pipelines, data transformation, data movement
---
### Developer Services
#### SankofaDev Platform
- **Purpose**: Developer platform and tools
- **Meaning**: Development that remembers best practices
- **Use Cases**: CI/CD, developer tools, platform services
#### PhoenixForge Build Engine
- **Purpose**: Build and deployment services
- **Meaning**: Forge = creation; Phoenix = transformation
- **Use Cases**: Build systems, deployment automation, container builds
---
### Management Services
#### SankofaControl Plane
- **Purpose**: Infrastructure management and orchestration
- **Meaning**: Control that remembers and learns
- **Use Cases**: Infrastructure as code, orchestration, management
#### PhoenixRealm Management
- **Purpose**: Multi-cloud and multi-region management
- **Meaning**: Realms = regions; Phoenix = global power
- **Use Cases**: Multi-cloud management, global orchestration
---
## Naming Conventions
### Pattern Structure
1. **Phoenix-* prefix**: Transformation, rebirth, power
2. **Sankofa-* prefix**: Memory, return, ancestral wisdom
3. **Akan-* prefix**: Cultural heritage, identity
4. **Okra-* prefix**: Soul, identity, essence
5. **Nananom-* prefix**: Ancestors, memory, guidance
### Suffix Patterns
- **-Core**: Core services
- **-Edge**: Edge services
- **-Grid**: Network/mesh services
- **-Vault**: Storage services
- **-Guard**: Security services
- **-Realm**: Regional/global services
- **-Sphere**: Global/distributed services
---
## Product Hierarchy
### Tier 1: Core Infrastructure
- PhoenixCore Compute
- OkraVault Storage
- SankofaGrid Global Mesh
### Tier 2: Specialized Services
- Firebird AI Engine
- Aegis of Akan Shield
- SankofaGraph Database
### Tier 3: Platform Services
- SankofaDev Platform
- SankofaControl Plane
- PhoenixRealm Management
---
## Brand Consistency
All product names should:
* Reflect PhoenixSankofa brand values
* Honor Akan cultural heritage appropriately
* Convey technical capability
* Maintain mythic and sovereign positioning
* Be memorable and meaningful
* Support global, multi-cultural audience
---
## Future Product Names
As new services are developed, follow these principles:
1. **Meaningful**: Names should have cultural and technical meaning
2. **Consistent**: Follow established naming patterns
3. **Respectful**: Honor cultural heritage appropriately
4. **Powerful**: Convey capability and sovereignty
5. **Memorable**: Easy to remember and pronounce globally

225
docs/brand/taglines.md Normal file
View File

@@ -0,0 +1,225 @@
# Phoenix Sankofa Cloud: Taglines and Mission Statements
## Primary Tagline
**"The sovereign cloud born of fire and ancestral wisdom."**
---
## Mission Statement
**Phoenix Sankofa Cloud** exists to build the world's first sovereign AI cloud infrastructure that honors ancestral wisdom, reflects cultural identity, serves global sovereignty, and transforms through the power of rebirth and return.
We remember where we came from. We retrieve what was essential. We restore identity and sovereignty. We rise forward with purpose.
**Remember → Retrieve → Restore → Rise.**
---
## Vision Statement
A world where cloud infrastructure reflects cultural identity, honors ancestral wisdom, and serves true technological sovereignty across all 325 regions of the globe.
A future where technology remembers its origins, learns from the past, and rises forward with purpose.
A global cloud that is mythic, sovereign, ancestral, and transformative.
---
## Core Taglines
### Short Taglines (1-5 words)
* "Fire. Memory. Sovereignty."
* "Rebirth. Return. Rise."
* "Remember to rise forward."
* "Sovereign cloud. Ancestral wisdom."
* "Born of fire and memory."
### Medium Taglines (6-10 words)
* "The sovereign cloud born of fire and ancestral wisdom."
* "Remember where you came from. Rise where you're going."
* "Infrastructure that remembers. Technology that transforms."
* "Sovereign cloud powered by ancestral intelligence."
* "Rebirth through fire. Wisdom through return."
### Long Taglines (11+ words)
* "The sovereign cloud that remembers its origins, retrieves ancestral wisdom, and rises forward with purpose."
* "Born of Phoenix fire and Sankofa memory—the cloud infrastructure that honors identity and serves sovereignty."
* "Remember. Retrieve. Restore. Rise. The cloud that completes the cycle of transformation."
---
## Product-Specific Taglines
### PhoenixCore Compute
* "Compute powered by Phoenix fire."
* "The engine of transformation."
### SankofaEdge Nodes
* "Edge nodes that remember."
* "Intelligence at the edge, wisdom from the past."
### OkraVault Storage
* "Storage for the soul of your data."
* "Where your data's essence is preserved."
### Firebird AI Engine
* "AI that transforms like fire."
* "Intelligence reborn through ancestral patterns."
### Aegis of Akan Shield
* "Protection rooted in heritage."
* "Security that remembers what matters."
### OkraID
* "Identity that remembers who you are."
* "Soul-powered identity framework."
---
## Marketing Taglines by Audience
### For Sovereign Nations
* "Infrastructure that serves your sovereignty."
* "Cloud computing aligned with your identity."
* "Technology that honors your heritage."
### For Global Enterprises
* "Global cloud with cultural intelligence."
* "Infrastructure that understands the world."
* "Sovereign cloud for global operations."
### For Technology Leaders
* "The cloud that remembers to transform."
* "Infrastructure informed by ancestral wisdom."
* "Technology that honors identity and serves purpose."
### For Diaspora Communities
* "Reconnect with heritage through technology."
* "Infrastructure that remembers where you came from."
* "Technology that honors your ancestors."
---
## Value Proposition Statements
### Primary Value Proposition
**Phoenix Sankofa Cloud** delivers sovereign cloud infrastructure that combines:
* **Mythic Power**: Phoenix transformation and rebirth
* **Ancestral Wisdom**: Sankofa memory and return
* **Cultural Identity**: Akan heritage and sovereignty
* **Global Reach**: 325-region deployment
* **Technical Excellence**: World-class cloud infrastructure
### Secondary Value Propositions
1. **Sovereignty**: Complete control over infrastructure, data, and destiny
2. **Identity**: Infrastructure that reflects and honors cultural heritage
3. **Wisdom**: Systems informed by ancestral knowledge and recursive learning
4. **Transformation**: Continuous rebirth and renewal like the Phoenix
5. **Global Intelligence**: Cultural awareness across 325 regions
---
## Elevator Pitch (30 seconds)
**Phoenix Sankofa Cloud** is the world's first sovereign AI cloud that honors ancestral wisdom and serves global sovereignty. Unlike Azure's "sky" or AWS's abstraction, we build infrastructure rooted in cultural identity—combining Phoenix transformation with Sankofa memory. We remember where we came from, retrieve what's essential, and rise forward with purpose. Across 325 regions, we deliver cloud computing that reflects identity, serves sovereignty, and transforms through the power of rebirth and return.
---
## One-Liner
**"The sovereign cloud that remembers its origins and rises forward with ancestral wisdom."**
---
## Manifesto Opening
We declare the right to technological sovereignty.
We declare the right to infrastructure that reflects our identity.
We declare the right to cloud computing that honors our ancestors.
We declare the right to AI that remembers where it came from.
**This is Phoenix Sankofa Cloud.**
---
## Call to Action Phrases
* "Join the sovereign cloud movement."
* "Build with ancestral wisdom."
* "Rise with Phoenix Sankofa."
* "Remember. Retrieve. Restore. Rise."
* "Transform your infrastructure. Honor your heritage."
* "Start your sovereign cloud journey."
---
## Social Media Taglines
### Twitter/X (280 characters)
* "Phoenix Sankofa Cloud: The sovereign cloud born of fire and ancestral wisdom. Remember → Retrieve → Restore → Rise. #SovereignCloud #AncestralWisdom"
* "Infrastructure that remembers. Technology that transforms. Cloud that serves sovereignty. #PhoenixSankofa #SovereignCloud"
### LinkedIn
* "Phoenix Sankofa Cloud: Where mythic power meets ancestral wisdom in global cloud infrastructure."
* "Building sovereign cloud infrastructure that honors identity and serves global sovereignty."
### Instagram
* "Fire. Memory. Sovereignty. 🌋🕊️ #PhoenixSankofa"
* "Remember to rise forward. 🔥✨ #SovereignCloud"
---
## Internal Mission Alignment
### For Employees
* "Building infrastructure that matters."
* "Technology with purpose. Heritage with power."
* "Remember. Build. Transform. Rise."
### For Partners
* "Partners in sovereign transformation."
* "Together, we rise with purpose."
* "Building the future on ancestral foundations."
---
## Tagline Guidelines
### Do's
* Use mythic and sovereign language
* Reference ancestral wisdom appropriately
* Convey transformation and rebirth
* Emphasize sovereignty and identity
* Keep it meaningful and memorable
### Don'ts
* Don't use generic cloud terminology
* Don't ignore cultural heritage
* Don't make false promises
* Don't use overly technical jargon
* Don't forget the emotional resonance
---
## Evolution
Taglines may evolve as the brand grows, but core principles remain:
* **Fire** (Phoenix transformation)
* **Memory** (Sankofa return)
* **Sovereignty** (Self-determination)
* **Identity** (Cultural heritage)
* **Rising** (Forward movement)
These elements should always be present in some form.

View File

@@ -0,0 +1,344 @@
# Datacenter Architecture for Phoenix Sankofa Cloud
## Overview
Phoenix Sankofa Cloud requires a multi-tier datacenter architecture to support a 325-region global deployment. This document outlines the datacenter infrastructure that complements the edge implementation, providing core compute, storage, and blockchain services.
## Architecture Tiers
### Tier 1: Core Datacenters (Hub Sites)
**Purpose**: Primary infrastructure hubs for blockchain consensus, core services, and regional coordination.
**Deployment**: 10-15 strategic locations globally
- North America: 2-3 sites (US-East, US-West, Canada)
- Europe: 2-3 sites (UK, Germany, France)
- Asia-Pacific: 2-3 sites (Singapore, Japan, Australia)
- Africa: 1-2 sites (South Africa, Kenya)
- Latin America: 1-2 sites (Brazil, Mexico)
- Middle East: 1 site (UAE)
**Infrastructure Requirements**:
#### Compute Infrastructure
- **Blockchain Validator Nodes**:
- 3-5 validator nodes per datacenter (for Byzantine fault tolerance)
- High-performance CPUs (AMD EPYC or Intel Xeon)
- 64-128GB RAM per node
- NVMe storage for blockchain state (2-4TB per node)
- **Consensus Layer**:
- Enterprise Ethereum Alliance (EEA) compatible blockchain
- Proof of Authority (PoA) or Proof of Stake (PoS) consensus
- Multi-party governance nodes
- **Core Services**:
- Kubernetes control plane clusters (3 master + 5 worker nodes minimum)
- Database clusters (PostgreSQL with replication)
- Message queue clusters (Kafka/Redpanda)
- Object storage (MinIO/Ceph S3-compatible)
#### Storage Infrastructure
- **Blockchain State Storage**:
- Distributed storage for blockchain ledger
- 50-100TB per datacenter
- High IOPS NVMe arrays
- **Application Data Storage**:
- Primary storage: 500TB-1PB per datacenter
- Backup storage: 2x primary capacity
- Object storage: 5-10PB per datacenter
- **Storage Technologies**:
- Ceph for distributed block/object storage
- ZFS for high-performance local storage
- MinIO for S3-compatible object storage
#### Network Infrastructure
- **Inter-Datacenter Connectivity**:
- Dedicated dark fiber or high-bandwidth leased lines
- Minimum 100Gbps links between core datacenters
- Redundant paths for fault tolerance
- **Blockchain Network**:
- Private blockchain network overlay
- Encrypted peer-to-peer connections
- Network segmentation for security
- **Public Connectivity**:
- Multiple Tier-1 ISP connections
- BGP routing for redundancy
- DDoS protection and mitigation
#### Power and Cooling
- **Power Requirements**:
- 2-5MW per core datacenter
- N+1 UPS systems
- Backup generators (72-hour fuel capacity)
- Power distribution units (PDUs) with monitoring
- **Cooling**:
- Precision cooling systems
- Hot aisle/cold aisle containment
- Liquid cooling for high-density compute
- Environmental monitoring
### Tier 2: Regional Datacenters (Spoke Sites)
**Purpose**: Regional aggregation points, blockchain read replicas, and regional service delivery.
**Deployment**: 50-75 locations globally
- One per major metropolitan area
- Strategic locations for latency optimization
- Proximity to edge sites
**Infrastructure Requirements**:
#### Compute Infrastructure
- **Blockchain Read Replicas**:
- 2-3 read-only blockchain nodes
- Query optimization for regional access
- 32-64GB RAM per node
- **Regional Services**:
- Kubernetes clusters (3 master + 3 worker nodes)
- Regional database replicas
- CDN edge nodes
- Regional API gateways
#### Storage Infrastructure
- **Regional Storage**:
- 100-500TB primary storage
- 200TB-1PB object storage
- Blockchain state cache (10-20TB)
#### Network Infrastructure
- **Connectivity**:
- 10-40Gbps links to core datacenters
- Multiple ISP connections
- Direct peering where available
#### Power and Cooling
- **Power Requirements**:
- 500kW-2MW per regional datacenter
- N+1 UPS systems
- Backup generators (48-hour fuel capacity)
### Tier 3: Edge Sites (Existing Implementation)
**Purpose**: Low-latency compute at the network edge.
**Deployment**: 250+ locations globally
- Already documented in edge implementation
- Proxmox-based infrastructure
- Connected to regional datacenters
## Blockchain Infrastructure
### Enterprise Ethereum Alliance (EEA) Architecture
#### Blockchain Network Topology
**Consensus Layer**:
- **Validator Nodes**: Deployed in Tier 1 core datacenters
- **Consensus Algorithm**: Proof of Authority (PoA) or Proof of Stake (PoS)
- **Governance**: Multi-party governance model
- **Network Type**: Private/permissioned blockchain
**Use Cases** (Non-Cryptocurrency):
1. **Supply Chain Provenance**:
- Track hardware components from manufacturer to deployment
- Verify authenticity and compliance
- Immutable audit trail
2. **Resource Allocation and Billing**:
- Transparent resource usage tracking
- Multi-party billing verification
- Automated settlement
3. **Identity and Access Management**:
- Sovereign identity verification
- Cross-region identity federation
- Access control policies
4. **Compliance and Auditing**:
- Regulatory compliance tracking
- Audit log immutability
- Multi-party verification
5. **Service Level Agreements (SLAs)**:
- Smart contracts for SLA enforcement
- Automated compliance checking
- Penalty/reward mechanisms
#### Blockchain Components
**Smart Contracts**:
- Resource provisioning contracts
- Billing and settlement contracts
- Identity verification contracts
- Compliance tracking contracts
- SLA enforcement contracts
**Blockchain Nodes**:
- **Full Nodes**: Core datacenters (complete blockchain state)
- **Archive Nodes**: Select core datacenters (complete historical state)
- **Read Replicas**: Regional datacenters (query optimization)
- **Light Clients**: Edge sites (minimal state, query only)
**Blockchain Storage**:
- **State Database**: LevelDB or RocksDB for current state
- **Block Storage**: Distributed across core datacenters
- **Archive Storage**: Long-term archival for compliance
**Blockchain Network**:
- **P2P Network**: Encrypted peer-to-peer connections
- **Network Overlay**: VPN or dedicated network for blockchain traffic
- **Consensus Communication**: Secure channels for validator communication
## Integration with Edge Infrastructure
### Data Flow
1. **Edge → Regional → Core**:
- Edge sites collect metrics and events
- Regional datacenters aggregate and process
- Core datacenters store in blockchain and provide consensus
2. **Blockchain → Regional → Edge**:
- Core datacenters maintain blockchain state
- Regional datacenters cache frequently accessed data
- Edge sites query regional replicas for low latency
3. **Cross-Region Communication**:
- Blockchain provides trust layer for cross-region operations
- Smart contracts enforce policies and agreements
- Immutable audit trail for all cross-region transactions
### Service Integration
**Control Plane Services**:
- Kubernetes control planes in core and regional datacenters
- Crossplane for infrastructure provisioning
- ArgoCD for GitOps deployments
- All integrated with blockchain for audit and verification
**Monitoring and Observability**:
- Prometheus/Grafana in all tiers
- Metrics aggregated to core datacenters
- Blockchain stores critical events and state changes
**Identity and Access**:
- Keycloak/OkraID in core datacenters
- Blockchain for identity verification and federation
- Regional replicas for low-latency authentication
## Security Architecture
### Physical Security
- **Access Control**: Biometric access, visitor logs
- **Surveillance**: 24/7 monitoring, video recording
- **Environmental**: Fire suppression, flood detection
- **Compliance**: SOC 2, ISO 27001, regional compliance
### Network Security
- **Segmentation**: Network zones for different tiers
- **Encryption**: TLS/SSL for all connections
- **Firewall**: Next-generation firewalls
- **DDoS Protection**: Multi-layer DDoS mitigation
### Blockchain Security
- **Validator Security**: Hardware security modules (HSMs)
- **Key Management**: Secure key storage and rotation
- **Access Control**: Permissioned blockchain with role-based access
- **Audit Logging**: All blockchain transactions logged
## Disaster Recovery and Business Continuity
### Backup Strategy
- **Blockchain State**: Replicated across 3+ core datacenters
- **Application Data**: Multi-region replication
- **Backup Frequency**: Continuous replication + daily snapshots
- **Retention**: 7-year retention for compliance
### Failover Procedures
- **Automatic Failover**: For regional datacenters
- **Manual Failover**: For core datacenters with governance approval
- **Recovery Time Objective (RTO)**: < 4 hours for core, < 1 hour for regional
- **Recovery Point Objective (RPO)**: < 15 minutes
### Geographic Redundancy
- **Core Datacenters**: Minimum 3 active, 2 standby
- **Regional Datacenters**: N+1 redundancy per region
- **Edge Sites**: Automatic failover to adjacent sites
## Compliance and Governance
### Regulatory Compliance
- **Data Residency**: Regional data storage requirements
- **Privacy**: GDPR, CCPA, and regional privacy laws
- **Financial**: SOX compliance for billing/accounting
- **Industry**: HIPAA, PCI-DSS where applicable
### Blockchain Governance
- **Governance Model**: Multi-party governance board
- **Decision Making**: Consensus-based decision making
- **Upgrade Process**: Formal proposal and voting process
- **Dispute Resolution**: On-chain and off-chain mechanisms
## Cost Optimization
### Infrastructure Costs
- **Core Datacenters**: $2-5M per site (initial)
- **Regional Datacenters**: $500K-2M per site (initial)
- **Ongoing Operations**: 20-30% of initial cost annually
### Optimization Strategies
- **Right-Sizing**: Start small, scale based on demand
- **Reserved Capacity**: Long-term commitments for cost savings
- **Efficiency**: Power and cooling optimization
- **Automation**: Reduce operational overhead
## Deployment Phases
### Phase 1: Foundation (Months 1-6)
- Deploy 3 core datacenters (US, EU, APAC)
- Deploy blockchain network with initial validators
- Deploy 10 regional datacenters
- Integrate with existing edge infrastructure
### Phase 2: Expansion (Months 7-18)
- Expand to 6-8 core datacenters
- Deploy 30-40 regional datacenters
- Expand blockchain network
- Full integration testing
### Phase 3: Scale (Months 19-36)
- Complete 10-15 core datacenters
- Deploy 50-75 regional datacenters
- Full blockchain network deployment
- 325-region global coverage
## Monitoring and Management
### Datacenter Management
- **DCIM**: Data Center Infrastructure Management
- **Power Monitoring**: Real-time power usage and efficiency
- **Environmental Monitoring**: Temperature, humidity, airflow
- **Asset Management**: Hardware inventory and lifecycle
### Blockchain Monitoring
- **Node Health**: Validator and replica node status
- **Network Performance**: Latency, throughput, block times
- **Smart Contract Metrics**: Execution times, gas usage
- **Security Monitoring**: Anomaly detection, attack prevention
## Next Steps
1. **Site Selection**: Identify and secure datacenter locations
2. **Hardware Procurement**: Order and deploy infrastructure
3. **Blockchain Setup**: Deploy and configure blockchain network
4. **Integration**: Integrate with existing edge infrastructure
5. **Testing**: Comprehensive testing and validation
6. **Deployment**: Phased rollout following deployment plan

540
docs/deployment_plan.md Normal file
View File

@@ -0,0 +1,540 @@
# Phoenix Sankofa Cloud: Deployment Plan
## Overview
This deployment plan outlines the phased rollout of Phoenix Sankofa Cloud across 325 regions, including edge sites, regional datacenters, core datacenters, and blockchain infrastructure. The deployment follows a structured approach to ensure reliability, security, and scalability.
## Deployment Phases
### Phase 1: Foundation (Months 1-6)
**Objective**: Establish core infrastructure and blockchain network foundation
#### Month 1-2: Core Datacenter Setup
1. **Site Selection and Preparation**
- Identify and secure 3 core datacenter locations (US, EU, APAC)
- Complete facility assessments and compliance reviews
- Procure power, cooling, and network connectivity
- Set up physical security and access controls
2. **Infrastructure Deployment**
- Deploy power and cooling systems
- Install network infrastructure (switches, routers, firewalls)
- Set up monitoring and management systems
- Configure backup power and generators
#### Month 3-4: Blockchain Network Initialization
1. **Blockchain Platform Setup**
- Choose blockchain platform (Hyperledger Besu recommended)
- Deploy 3 validator nodes (one per core datacenter)
- Configure consensus mechanism (PoA initial)
- Set up network connectivity between validators
2. **Smart Contract Development**
- Develop initial smart contracts:
- Resource provisioning contracts
- Identity management contracts
- Basic billing contracts
- Security audit of smart contracts
- Deploy smart contracts to blockchain network
3. **Blockchain Integration**
- Integrate blockchain with control plane
- Set up API gateway for blockchain access
- Configure monitoring and alerting
- Test blockchain network functionality
#### Month 5-6: Regional Datacenter Deployment
1. **Regional Site Selection**
- Identify 10 strategic regional datacenter locations
- Complete facility assessments
- Procure infrastructure components
2. **Regional Infrastructure Deployment**
- Deploy compute infrastructure (Kubernetes clusters)
- Deploy storage infrastructure
- Deploy blockchain read replica nodes
- Set up network connectivity to core datacenters
3. **Integration and Testing**
- Integrate regional datacenters with core
- Test blockchain read replica functionality
- Validate data replication and synchronization
- Performance testing and optimization
### Phase 2: Expansion (Months 7-18)
**Objective**: Expand to 6-8 core datacenters and 30-40 regional datacenters
#### Month 7-9: Core Expansion
1. **Additional Core Datacenters**
- Deploy 3-5 additional core datacenters
- Deploy blockchain validator nodes
- Expand blockchain network
- Integrate with existing infrastructure
2. **Blockchain Network Expansion**
- Add validators to blockchain network
- Deploy additional smart contracts:
- Supply chain provenance contracts
- Compliance and audit contracts
- SLA enforcement contracts
- Enhance blockchain monitoring and management
#### Month 10-12: Regional Expansion
1. **Regional Datacenter Deployment**
- Deploy 20 additional regional datacenters
- Deploy blockchain read replicas
- Set up regional services (API gateways, CDN)
- Integrate with core datacenters
2. **Edge Site Integration**
- Integrate existing edge sites with regional datacenters
- Deploy blockchain light clients to edge sites
- Set up edge-to-regional data flows
- Test end-to-end functionality
#### Month 13-18: Full Integration
1. **Service Integration**
- Integrate all services with blockchain
- Deploy comprehensive monitoring
- Set up automated operations
- Performance optimization
2. **Testing and Validation**
- Comprehensive system testing
- Security audits and penetration testing
- Performance benchmarking
- Disaster recovery testing
### Phase 3: Scale (Months 19-36)
**Objective**: Complete 325-region global deployment
#### Month 19-24: Global Expansion
1. **Core Datacenter Completion**
- Deploy remaining core datacenters (10-15 total)
- Complete blockchain validator network
- Global blockchain network deployment
2. **Regional Datacenter Completion**
- Deploy remaining regional datacenters (50-75 total)
- Deploy blockchain read replicas
- Complete regional service deployment
#### Month 25-30: Edge Site Expansion
1. **Edge Site Deployment**
- Deploy additional edge sites (250+ total)
- Deploy blockchain light clients
- Complete edge-to-regional integration
- Global edge network completion
2. **Global Integration**
- Complete global network integration
- Deploy global monitoring and management
- Set up global operations centers
- Complete compliance and governance setup
#### Month 31-36: Optimization and Maturity
1. **Performance Optimization**
- Optimize blockchain network performance
- Optimize data replication and synchronization
- Optimize network routing and latency
- Capacity planning and optimization
2. **Maturity and Operations**
- Establish mature operations procedures
- Complete documentation and training
- Set up 24/7 operations centers
- Continuous improvement and optimization
## Deployment Procedures
### Core Datacenter Deployment
#### Pre-Deployment Checklist
- [ ] Site selected and secured
- [ ] Power and cooling capacity verified
- [ ] Network connectivity established
- [ ] Physical security configured
- [ ] Compliance requirements met
- [ ] Hardware procured and delivered
- [ ] Deployment team assigned
#### Deployment Steps
1. **Physical Infrastructure**
```bash
# Install power and cooling systems
# Configure UPS and generators
# Set up network infrastructure
# Configure physical security
```
2. **Compute Infrastructure**
```bash
# Deploy blockchain validator nodes
# Deploy Kubernetes control plane
# Deploy database clusters
# Deploy message queue clusters
```
3. **Storage Infrastructure**
```bash
# Deploy distributed storage (Ceph)
# Deploy object storage (MinIO)
# Configure backup systems
# Set up replication
```
4. **Network Configuration**
```bash
# Configure inter-datacenter links
# Set up blockchain network overlay
# Configure firewalls and security
# Set up monitoring and management
```
5. **Blockchain Setup**
```bash
# Initialize blockchain node
# Join blockchain network
# Deploy smart contracts
# Configure monitoring
```
6. **Integration**
```bash
# Integrate with control plane
# Configure service discovery
# Set up monitoring and alerting
# Test end-to-end functionality
```
7. **Validation**
```bash
# Run validation tests
# Performance testing
# Security testing
# Disaster recovery testing
```
### Regional Datacenter Deployment
#### Pre-Deployment Checklist
- [ ] Site selected and secured
- [ ] Network connectivity to core datacenters established
- [ ] Hardware procured and delivered
- [ ] Deployment team assigned
#### Deployment Steps
1. **Physical Infrastructure**
```bash
# Install power and cooling
# Configure network infrastructure
# Set up physical security
```
2. **Compute Infrastructure**
```bash
# Deploy blockchain read replica nodes
# Deploy Kubernetes clusters
# Deploy regional services
```
3. **Storage Infrastructure**
```bash
# Deploy storage systems
# Configure replication from core
# Set up caching
```
4. **Network Configuration**
```bash
# Configure links to core datacenters
# Set up regional network
# Configure security
```
5. **Integration**
```bash
# Integrate with core datacenters
# Configure data synchronization
# Set up monitoring
# Test functionality
```
### Edge Site Deployment
**Note**: Edge sites follow existing edge implementation procedures. See edge implementation documentation.
**Additional Steps for Blockchain Integration**:
1. Deploy blockchain light client
2. Configure blockchain queries
3. Integrate with regional datacenters
4. Test blockchain functionality
## Blockchain Deployment
### Validator Node Deployment
#### Prerequisites
- Hardware Security Module (HSM) for key storage
- High-performance server (64-128GB RAM, NVMe storage)
- Network connectivity to other validators
- Access to blockchain network
#### Deployment Steps
1. **Node Setup**
```bash
# Install blockchain platform (Hyperledger Besu)
# Configure node settings
# Set up HSM for key management
# Generate validator keys
```
2. **Network Join**
```bash
# Configure network connectivity
# Join blockchain network
# Sync blockchain state
# Verify validator status
```
3. **Monitoring**
```bash
# Set up monitoring
# Configure alerting
# Test validator functionality
```
### Read Replica Deployment
#### Prerequisites
- Server with 32-64GB RAM
- Network connectivity to validators
- Access to blockchain network
#### Deployment Steps
1. **Node Setup**
```bash
# Install blockchain platform
# Configure as read replica
# Set up network connectivity
```
2. **Synchronization**
```bash
# Sync blockchain state
# Configure query optimization
# Set up caching
```
3. **Integration**
```bash
# Integrate with applications
# Configure API access
# Set up monitoring
```
### Smart Contract Deployment
#### Prerequisites
- Smart contracts developed and audited
- Access to blockchain network
- Deployment credentials
#### Deployment Steps
1. **Compilation**
```bash
# Compile smart contracts
# Run security checks
# Generate deployment artifacts
```
2. **Deployment**
```bash
# Deploy to test network
# Test functionality
# Deploy to production network
# Verify deployment
```
3. **Integration**
```bash
# Update application code
# Configure contract addresses
# Test integration
# Monitor contract usage
```
## Integration Procedures
### Control Plane Integration
1. **Kubernetes Integration**
- Deploy Kubernetes clusters
- Configure Crossplane for infrastructure provisioning
- Integrate with blockchain for resource tracking
- Set up ArgoCD for GitOps
2. **Identity Integration**
- Deploy Keycloak/OkraID
- Integrate with blockchain identity layer
- Configure identity federation
- Set up access control
3. **Monitoring Integration**
- Deploy Prometheus/Grafana
- Configure blockchain monitoring
- Set up alerting
- Create dashboards
### Network Integration
1. **Cloudflare Integration**
- Configure Cloudflare Zero Trust
- Set up Cloudflare Tunnels
- Configure access policies
- Test connectivity
2. **Inter-Datacenter Links**
- Provision network links
- Configure routing
- Set up redundancy
- Test connectivity
### Storage Integration
1. **Distributed Storage**
- Deploy Ceph clusters
- Configure replication
- Set up monitoring
- Test performance
2. **Object Storage**
- Deploy MinIO
- Configure S3 compatibility
- Set up replication
- Test functionality
## Validation and Testing
### Functional Testing
- [ ] All services operational
- [ ] Blockchain network functional
- [ ] Smart contracts working correctly
- [ ] Integration points validated
- [ ] End-to-end workflows tested
### Performance Testing
- [ ] Latency targets met
- [ ] Throughput targets met
- [ ] Scalability validated
- [ ] Resource utilization optimized
### Security Testing
- [ ] Security audits completed
- [ ] Penetration testing passed
- [ ] Access controls validated
- [ ] Encryption verified
- [ ] Compliance requirements met
### Disaster Recovery Testing
- [ ] Backup procedures tested
- [ ] Failover procedures tested
- [ ] Recovery time objectives met
- [ ] Recovery point objectives met
- [ ] Geographic redundancy validated
## Rollback Procedures
### Rollback Triggers
- Critical security vulnerabilities
- Performance degradation
- Data integrity issues
- Service unavailability
- Compliance violations
### Rollback Steps
1. **Immediate Actions**
- Isolate affected components
- Notify stakeholders
- Activate incident response
2. **Assessment**
- Assess impact and scope
- Determine rollback strategy
- Get approval for rollback
3. **Execution**
- Execute rollback procedures
- Restore previous state
- Verify functionality
- Monitor stability
4. **Post-Rollback**
- Root cause analysis
- Fix identified issues
- Update procedures
- Plan re-deployment
## Operations and Maintenance
### Daily Operations
- Monitor system health
- Review alerts and incidents
- Check backup status
- Verify blockchain network status
### Weekly Operations
- Review performance metrics
- Check capacity utilization
- Review security logs
- Update documentation
### Monthly Operations
- Capacity planning review
- Security audit review
- Compliance review
- Disaster recovery testing
- Performance optimization
### Quarterly Operations
- Comprehensive security audit
- Disaster recovery drill
- Capacity planning update
- Technology refresh planning
- Compliance certification review
## Success Criteria
### Phase 1 Success Criteria
- [ ] 3 core datacenters operational
- [ ] Blockchain network functional with 3 validators
- [ ] 10 regional datacenters operational
- [ ] Integration with existing edge sites
- [ ] Basic smart contracts deployed
- [ ] Monitoring and alerting operational
### Phase 2 Success Criteria
- [ ] 6-8 core datacenters operational
- [ ] Blockchain network expanded
- [ ] 30-40 regional datacenters operational
- [ ] Full smart contract suite deployed
- [ ] Comprehensive integration completed
- [ ] Security and compliance validated
### Phase 3 Success Criteria
- [ ] 10-15 core datacenters operational
- [ ] 50-75 regional datacenters operational
- [ ] 250+ edge sites operational
- [ ] 325-region global coverage
- [ ] Full blockchain network deployment
- [ ] Mature operations and procedures
## Related Documentation
- [System Architecture](./system_architecture.md) - Overall system architecture
- [Datacenter Architecture](./datacenter_architecture.md) - Datacenter specifications
- [Blockchain EEA Architecture](./blockchain_eea_architecture.md) - Blockchain architecture
- [Hardware BOM](./hardware_bom.md) - Hardware specifications

2
docs/diagrams.txt Normal file
View File

@@ -0,0 +1,2 @@
Block Diagram:
[Text-based representation]

415
docs/hardware_bom.md Normal file
View File

@@ -0,0 +1,415 @@
# Phoenix Sankofa Cloud: Hardware Bill of Materials (BOM)
## Overview
This document provides detailed hardware specifications for Phoenix Sankofa Cloud infrastructure across all tiers: Core Datacenters, Regional Datacenters, and Edge Sites. The BOM includes blockchain infrastructure, compute, storage, and networking components.
## Tier 1: Core Datacenters
### Blockchain Validator Nodes
**Quantity**: 3-5 nodes per core datacenter (10-15 datacenters = 30-75 nodes total)
**Specifications**:
- **CPU**: AMD EPYC 7763 (64 cores) or Intel Xeon Platinum 8380 (40 cores)
- **RAM**: 128GB DDR4 ECC (expandable to 256GB)
- **Storage**:
- 2x 4TB NVMe SSD (RAID 1) for blockchain state
- 1x 1TB NVMe SSD for OS and applications
- **Network**: 2x 25GbE network adapters
- **HSM**: Hardware Security Module for key storage (e.g., Thales Luna Network HSM)
- **Power**: 500W-750W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $15,000-$25,000 per node
### Kubernetes Control Plane Nodes
**Quantity**: 3 master + 5 worker nodes per core datacenter
**Master Node Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 64GB DDR4 ECC
- **Storage**: 2x 1TB NVMe SSD (RAID 1)
- **Network**: 2x 25GbE network adapters
- **Power**: 400W-600W per node
- **Form Factor**: 1U rack server
**Worker Node Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC (expandable to 256GB)
- **Storage**: 2x 2TB NVMe SSD (RAID 1)
- **Network**: 2x 25GbE network adapters
- **GPU**: Optional NVIDIA A100 (40GB) for AI/ML workloads
- **Power**: 500W-750W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $8,000-$12,000 per master node, $12,000-$18,000 per worker node
### Database Cluster Nodes
**Quantity**: 3-node PostgreSQL cluster per core datacenter
**Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 256GB DDR4 ECC (expandable to 512GB)
- **Storage**:
- 2x 4TB NVMe SSD (RAID 1) for database
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 600W-800W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $18,000-$25,000 per node
### Message Queue Cluster Nodes
**Quantity**: 3-node Kafka/Redpanda cluster per core datacenter
**Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC
- **Storage**:
- 4x 4TB NVMe SSD (RAID 10) for message storage
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 600W-800W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $15,000-$20,000 per node
### Storage Infrastructure
**Distributed Storage (Ceph)**:
- **Storage Nodes**: 6-12 nodes per core datacenter
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC
- **Storage**:
- 12x 16TB HDD (SATA) for object storage
- 2x 2TB NVMe SSD for cache/metadata
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 800W-1000W per node
- **Form Factor**: 4U rack server
**Estimated Cost**: $20,000-$30,000 per storage node
**Object Storage (MinIO)**:
- **Storage Nodes**: 4-8 nodes per core datacenter
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC
- **Storage**:
- 12x 16TB HDD (SATA) for object storage
- 2x 2TB NVMe SSD for cache
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 800W-1000W per node
- **Form Factor**: 4U rack server
**Estimated Cost**: $20,000-$30,000 per storage node
### Network Infrastructure
**Core Switches**:
- **Quantity**: 2-4 per core datacenter (redundancy)
- **Specifications**: 100GbE spine switches (48-64 ports)
- **Examples**: Arista 7280SR3, Cisco Nexus 9300, Juniper QFX5200
- **Estimated Cost**: $50,000-$100,000 per switch
**Leaf Switches**:
- **Quantity**: 8-16 per core datacenter
- **Specifications**: 25GbE/100GbE leaf switches (48 ports)
- **Examples**: Arista 7050SX3, Cisco Nexus 9300, Juniper QFX5100
- **Estimated Cost**: $15,000-$30,000 per switch
**Firewalls**:
- **Quantity**: 2-4 per core datacenter (redundancy)
- **Specifications**: Next-generation firewalls with 100Gbps throughput
- **Examples**: Palo Alto PA-7000, Fortinet FortiGate 6000, Check Point 16000
- **Estimated Cost**: $100,000-$200,000 per firewall
**Load Balancers**:
- **Quantity**: 2-4 per core datacenter (redundancy)
- **Specifications**: Application delivery controllers with 100Gbps throughput
- **Examples**: F5 BIG-IP, Citrix ADC, A10 Networks
- **Estimated Cost**: $50,000-$150,000 per load balancer
### Power and Cooling
**UPS Systems**:
- **Quantity**: 2-4 per core datacenter (redundancy)
- **Specifications**: 2-5MW UPS systems with N+1 redundancy
- **Estimated Cost**: $500,000-$1,000,000 per UPS system
**Generators**:
- **Quantity**: 2-4 per core datacenter (redundancy)
- **Specifications**: 2-5MW diesel generators with 72-hour fuel capacity
- **Estimated Cost**: $300,000-$600,000 per generator
**Cooling Systems**:
- **Quantity**: Multiple units per core datacenter
- **Specifications**: Precision cooling with hot aisle/cold aisle containment
- **Estimated Cost**: $200,000-$500,000 per cooling system
### Total Core Datacenter Hardware Cost
**Per Core Datacenter**: $5M-$10M (initial investment)
**Total (10-15 datacenters)**: $50M-$150M
## Tier 2: Regional Datacenters
### Blockchain Read Replica Nodes
**Quantity**: 2-3 nodes per regional datacenter (50-75 datacenters = 100-225 nodes total)
**Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 64GB DDR4 ECC
- **Storage**:
- 2x 2TB NVMe SSD (RAID 1) for blockchain state cache
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 400W-600W per node
- **Form Factor**: 1U rack server
**Estimated Cost**: $8,000-$12,000 per node
### Kubernetes Cluster Nodes
**Quantity**: 3 master + 3 worker nodes per regional datacenter
**Master Node Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 64GB DDR4 ECC
- **Storage**: 2x 1TB NVMe SSD (RAID 1)
- **Network**: 2x 25GbE network adapters
- **Power**: 400W-600W per node
- **Form Factor**: 1U rack server
**Worker Node Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC
- **Storage**: 2x 2TB NVMe SSD (RAID 1)
- **Network**: 2x 25GbE network adapters
- **Power**: 500W-750W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $8,000-$12,000 per master node, $12,000-$18,000 per worker node
### Regional Database Replicas
**Quantity**: 2-node PostgreSQL replica cluster per regional datacenter
**Specifications**:
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC
- **Storage**:
- 2x 2TB NVMe SSD (RAID 1) for database
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 500W-700W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $12,000-$18,000 per node
### Storage Infrastructure
**Primary Storage**:
- **Storage Nodes**: 2-4 nodes per regional datacenter
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC
- **Storage**:
- 8x 8TB HDD (SATA) for primary storage
- 2x 2TB NVMe SSD for cache
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 600W-800W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $12,000-$18,000 per storage node
**Object Storage**:
- **Storage Nodes**: 2-4 nodes per regional datacenter
- **CPU**: AMD EPYC 7543 (32 cores) or Intel Xeon Gold 6338 (32 cores)
- **RAM**: 128GB DDR4 ECC
- **Storage**:
- 8x 8TB HDD (SATA) for object storage
- 2x 2TB NVMe SSD for cache
- 1x 1TB NVMe SSD for OS
- **Network**: 2x 25GbE network adapters
- **Power**: 600W-800W per node
- **Form Factor**: 2U rack server
**Estimated Cost**: $12,000-$18,000 per storage node
### Network Infrastructure
**Switches**:
- **Quantity**: 4-8 per regional datacenter
- **Specifications**: 25GbE/100GbE switches (48 ports)
- **Estimated Cost**: $15,000-$30,000 per switch
**Firewalls**:
- **Quantity**: 2 per regional datacenter (redundancy)
- **Specifications**: Next-generation firewalls with 40Gbps throughput
- **Estimated Cost**: $30,000-$60,000 per firewall
**Load Balancers**:
- **Quantity**: 2 per regional datacenter (redundancy)
- **Specifications**: Application delivery controllers with 40Gbps throughput
- **Estimated Cost**: $20,000-$50,000 per load balancer
### Power and Cooling
**UPS Systems**:
- **Quantity**: 2 per regional datacenter (redundancy)
- **Specifications**: 500kW-2MW UPS systems with N+1 redundancy
- **Estimated Cost**: $100,000-$300,000 per UPS system
**Generators**:
- **Quantity**: 2 per regional datacenter (redundancy)
- **Specifications**: 500kW-2MW diesel generators with 48-hour fuel capacity
- **Estimated Cost**: $80,000-$200,000 per generator
**Cooling Systems**:
- **Quantity**: Multiple units per regional datacenter
- **Specifications**: Precision cooling systems
- **Estimated Cost**: $50,000-$150,000 per cooling system
### Total Regional Datacenter Hardware Cost
**Per Regional Datacenter**: $500K-$2M (initial investment)
**Total (50-75 datacenters)**: $25M-$150M
## Tier 3: Edge Sites
**Note**: Edge sites follow existing edge implementation hardware specifications. See edge implementation documentation for detailed BOM.
**Additional Hardware for Blockchain Integration**:
- **Blockchain Light Client**:
- Minimal hardware requirements
- Can run on existing edge infrastructure
- No additional hardware cost
## Network Connectivity
### Inter-Datacenter Links
**Core to Core**:
- **Bandwidth**: 100Gbps+ per link
- **Redundancy**: Multiple redundant paths
- **Type**: Dark fiber or high-bandwidth leased lines
- **Estimated Cost**: $50,000-$200,000 per link per year
**Core to Regional**:
- **Bandwidth**: 10-40Gbps per link
- **Redundancy**: Redundant paths
- **Type**: Leased lines or MPLS
- **Estimated Cost**: $20,000-$100,000 per link per year
**Regional to Edge**:
- **Bandwidth**: 1-10Gbps per link
- **Redundancy**: Internet with redundancy
- **Type**: Internet connectivity with Cloudflare Tunnels
- **Estimated Cost**: $5,000-$20,000 per link per year
## Software and Licensing
### Blockchain Platform
- **Hyperledger Besu**: Open source (no license cost)
- **Quorum**: Open source (no license cost)
- **Support**: Optional commercial support contracts
### Operating Systems
- **Linux**: Ubuntu Server LTS or RHEL (open source or subscription)
- **Estimated Cost**: $0-$500 per server per year
### Virtualization and Containerization
- **Kubernetes**: Open source (no license cost)
- **Proxmox VE**: Open source (optional support subscription)
- **Estimated Cost**: $0-$1,000 per cluster per year
### Database Software
- **PostgreSQL**: Open source (no license cost)
- **Support**: Optional commercial support contracts
### Monitoring and Management
- **Prometheus/Grafana**: Open source (no license cost)
- **Loki**: Open source (no license cost)
- **Commercial Monitoring**: Optional (e.g., Datadog, New Relic)
- **Estimated Cost**: $0-$50,000 per datacenter per year
### Security Software
- **HSM Software**: Included with HSM hardware
- **Firewall Software**: Included with firewall hardware
- **Security Tools**: Open source and commercial options
- **Estimated Cost**: $10,000-$50,000 per datacenter per year
## Total Project Hardware Cost Estimate
### Phase 1 (Foundation)
- **3 Core Datacenters**: $15M-$30M
- **10 Regional Datacenters**: $5M-$20M
- **Network Connectivity**: $2M-$5M
- **Total Phase 1**: $22M-$55M
### Phase 2 (Expansion)
- **Additional 3-5 Core Datacenters**: $15M-$50M
- **Additional 20 Regional Datacenters**: $10M-$40M
- **Network Connectivity**: $5M-$15M
- **Total Phase 2**: $30M-$105M
### Phase 3 (Scale)
- **Remaining Core Datacenters**: $20M-$70M
- **Remaining Regional Datacenters**: $10M-$90M
- **Network Connectivity**: $10M-$30M
- **Total Phase 3**: $40M-$190M
### Grand Total
**Total Hardware Investment**: $92M-$350M (over 36 months)
## Ongoing Operational Costs
### Power and Cooling
- **Core Datacenter**: $500K-$2M per year per datacenter
- **Regional Datacenter**: $100K-$500K per year per datacenter
- **Edge Site**: $10K-$50K per year per site
### Network Connectivity
- **Inter-Datacenter Links**: $50K-$200K per link per year
- **Internet Connectivity**: $10K-$50K per site per year
### Maintenance and Support
- **Hardware Maintenance**: 10-15% of hardware cost per year
- **Software Support**: $50K-$200K per datacenter per year
- **Professional Services**: $100K-$500K per year
### Total Annual Operational Costs
**Estimated**: $50M-$200M per year (at full scale)
## Procurement Considerations
### Vendor Selection
- **Diversity**: Use multiple vendors for redundancy
- **Support**: Ensure 24/7 support availability
- **Warranty**: Minimum 3-year warranty on all hardware
- **Compatibility**: Ensure hardware compatibility
### Lead Times
- **Standard Hardware**: 4-8 weeks
- **Custom Hardware**: 8-16 weeks
- **Network Equipment**: 6-12 weeks
- **Power and Cooling**: 12-24 weeks
### Deployment Schedule
- **Hardware Procurement**: Start 3-6 months before deployment
- **Staging and Testing**: 2-4 weeks per datacenter
- **Deployment**: 4-8 weeks per datacenter
- **Validation**: 1-2 weeks per datacenter
## Related Documentation
- [System Architecture](./system_architecture.md) - Overall system architecture
- [Datacenter Architecture](./datacenter_architecture.md) - Detailed datacenter specifications
- [Blockchain EEA Architecture](./blockchain_eea_architecture.md) - Blockchain architecture
- [Deployment Plan](./deployment_plan.md) - Deployment procedures

365
docs/system_architecture.md Normal file
View File

@@ -0,0 +1,365 @@
# Phoenix Sankofa Cloud: System Architecture
## Overview
Phoenix Sankofa Cloud is a multi-tier, globally distributed cloud infrastructure platform combining edge computing, regional datacenters, and core blockchain infrastructure. The architecture supports a 325-region global deployment with enterprise-grade blockchain capabilities for supply chain, identity, compliance, and resource management.
## Architecture Tiers
### Tier 1: Core Datacenters (Hub Sites)
**Purpose**: Primary infrastructure hubs for blockchain consensus, core services, and global coordination.
**Components**:
- Blockchain validator nodes (3-5 per datacenter)
- Kubernetes control plane clusters
- Core database clusters (PostgreSQL)
- Message queue clusters (Kafka/Redpanda)
- Object storage (MinIO/Ceph)
- Identity and access management (Keycloak/OkraID)
**Deployment**: 10-15 strategic locations globally
**See**: [Datacenter Architecture](./datacenter_architecture.md) for detailed specifications
### Tier 2: Regional Datacenters (Spoke Sites)
**Purpose**: Regional aggregation points, blockchain read replicas, and regional service delivery.
**Components**:
- Blockchain read replica nodes (2-3 per datacenter)
- Regional Kubernetes clusters
- Regional database replicas
- CDN edge nodes
- Regional API gateways
**Deployment**: 50-75 locations globally
**See**: [Datacenter Architecture](./datacenter_architecture.md) for detailed specifications
### Tier 3: Edge Sites (Edge Computing)
**Purpose**: Low-latency compute at the network edge.
**Components**:
- Proxmox VE clusters
- Light blockchain client nodes
- Edge compute nodes
- Local storage
- Cloudflare Tunnel agents
**Deployment**: 250+ locations globally
**See**: Existing edge implementation documentation in `docs/architecture/`
## Blockchain Architecture
### Enterprise Ethereum Alliance (EEA) Implementation
**Network Type**: Private, permissioned blockchain
**Consensus**: Proof of Authority (PoA) or Proof of Stake (PoS)
**Purpose**: Enterprise use cases (NOT cryptocurrencies)
**Key Components**:
- Validator nodes in Tier 1 core datacenters
- Read replica nodes in Tier 2 regional datacenters
- Light client nodes in Tier 3 edge sites
- Smart contracts for:
- Resource provisioning and tracking
- Supply chain provenance
- Identity and access management
- Billing and settlement
- Compliance and auditing
- SLA enforcement
**See**: [Blockchain EEA Architecture](./blockchain_eea_architecture.md) for detailed specifications
## System Components
### Control Plane
**Location**: Tier 1 and Tier 2 datacenters
**Components**:
- **Kubernetes**: Container orchestration
- **Crossplane**: Infrastructure as Code
- **ArgoCD**: GitOps deployment
- **Keycloak**: Identity and access management
- **Vault**: Secrets management
- **Prometheus/Grafana**: Monitoring and observability
- **Loki**: Log aggregation
**Integration**:
- All control plane operations recorded on blockchain
- Resource provisioning tracked via smart contracts
- Identity management integrated with blockchain identity layer
### Networking
**Global Network**:
- **Cloudflare Zero Trust**: Secure access layer
- **Cloudflare Tunnels**: Outbound-only connections
- **Inter-Datacenter Links**: 100Gbps+ between core datacenters
- **Regional Links**: 10-40Gbps to regional datacenters
- **Edge Connectivity**: High-speed internet with redundancy
**Blockchain Network**:
- **Private P2P Network**: Encrypted peer-to-peer connections
- **Network Overlay**: VPN or dedicated network segment
- **Consensus Communication**: Secure channels for validators
### Storage
**Tier 1 Core Datacenters**:
- Blockchain state storage: 50-100TB per datacenter
- Application data: 500TB-1PB per datacenter
- Object storage: 5-10PB per datacenter
- Backup storage: 2x primary capacity
**Tier 2 Regional Datacenters**:
- Primary storage: 100-500TB per datacenter
- Object storage: 200TB-1PB per datacenter
- Blockchain state cache: 10-20TB per datacenter
**Tier 3 Edge Sites**:
- Local storage: 40-200TB per site (as per edge implementation)
**Storage Technologies**:
- Ceph for distributed block/object storage
- ZFS for high-performance local storage
- MinIO for S3-compatible object storage
- LevelDB/RocksDB for blockchain state
### Compute
**Tier 1 Core Datacenters**:
- Blockchain validators: High-performance CPUs, 64-128GB RAM
- Kubernetes clusters: 3 master + 5 worker nodes minimum
- Database clusters: PostgreSQL with replication
- Message queues: Kafka/Redpanda clusters
**Tier 2 Regional Datacenters**:
- Blockchain read replicas: 32-64GB RAM
- Kubernetes clusters: 3 master + 3 worker nodes
- Regional services: API gateways, CDN nodes
**Tier 3 Edge Sites**:
- Proxmox clusters: As per edge implementation
- Edge compute: Low-latency processing
## Data Flow
### Resource Provisioning Flow
1. **User Request**: User requests resource via portal
2. **Control Plane**: Kubernetes/Crossplane processes request
3. **Blockchain Recording**: Resource provisioning recorded on blockchain via smart contract
4. **Infrastructure**: Resource provisioned in appropriate tier (edge/regional/core)
5. **Verification**: Multi-party verification via blockchain
6. **Monitoring**: Resource usage tracked and recorded
### Identity and Access Flow
1. **Identity Registration**: User identity registered on blockchain
2. **Authentication**: User authenticates via Keycloak/OkraID
3. **Blockchain Verification**: Identity verified via blockchain
4. **Access Grant**: Access granted based on verified identity
5. **Cross-Region**: Identity federation across regions via blockchain
### Supply Chain Flow
1. **Component Registration**: Hardware component registered on blockchain
2. **Transfer Tracking**: Each transfer recorded immutably
3. **Deployment Recording**: Component deployment recorded
4. **Compliance Verification**: Compliance checks verified via blockchain
5. **Audit Trail**: Complete history available for audit
### Billing and Settlement Flow
1. **Usage Tracking**: Resource usage tracked and recorded
2. **Blockchain Recording**: Usage data stored on blockchain
3. **Invoice Generation**: Smart contract generates invoice
4. **Multi-Party Verification**: Billing verified by multiple parties
5. **Automated Settlement**: Settlement executed via smart contract
## Security Architecture
### Physical Security
- Biometric access control
- 24/7 surveillance
- Fire suppression systems
- Environmental monitoring
- SOC 2, ISO 27001 compliance
### Network Security
- Network segmentation by tier
- TLS/SSL encryption for all connections
- Next-generation firewalls
- Multi-layer DDoS protection
- Zero Trust networking
### Blockchain Security
- Hardware Security Modules (HSMs) for validators
- Secure key management and rotation
- Permissioned blockchain with RBAC
- Smart contract security audits
- Emergency pause mechanisms
### Application Security
- OAuth2/JWT authentication
- Role-based access control (RBAC)
- Secrets management (Vault)
- Regular security audits
- Vulnerability scanning
## Integration Points
### Edge to Regional Integration
- Edge sites report metrics to regional datacenters
- Regional datacenters aggregate and process data
- Blockchain read replicas serve edge queries
### Regional to Core Integration
- Regional datacenters sync with core datacenters
- Core datacenters maintain blockchain consensus
- Global coordination via core datacenters
### Blockchain Integration
- All critical operations recorded on blockchain
- Smart contracts enforce policies and agreements
- Immutable audit trail for compliance
- Multi-party verification for transparency
### Control Plane Integration
- Kubernetes integrated with blockchain for resource tracking
- Crossplane provisions infrastructure with blockchain recording
- ArgoCD deployments tracked on blockchain
- Identity management integrated with blockchain identity layer
## Monitoring and Observability
### Infrastructure Monitoring
- **Prometheus**: Metrics collection
- **Grafana**: Visualization and dashboards
- **Loki**: Log aggregation
- **Alertmanager**: Alert routing and notification
### Blockchain Monitoring
- Validator node health and performance
- Network latency and throughput
- Smart contract execution metrics
- Security event monitoring
### Application Monitoring
- Application performance monitoring (APM)
- Error tracking and logging
- User experience monitoring
- Business metrics tracking
## Disaster Recovery
### Backup Strategy
- Blockchain state replicated across 3+ core datacenters
- Application data multi-region replication
- Continuous replication + daily snapshots
- 7-year retention for compliance
### Failover Procedures
- Automatic failover for regional datacenters
- Manual failover for core datacenters with governance approval
- RTO: < 4 hours for core, < 1 hour for regional
- RPO: < 15 minutes
### Geographic Redundancy
- Core datacenters: Minimum 3 active, 2 standby
- Regional datacenters: N+1 redundancy per region
- Edge sites: Automatic failover to adjacent sites
## Compliance and Governance
### Regulatory Compliance
- Data residency requirements
- GDPR, CCPA privacy compliance
- SOX financial compliance
- HIPAA, PCI-DSS where applicable
- Regional regulatory compliance
### Blockchain Governance
- Multi-party governance board
- Consensus-based decision making
- Formal upgrade process
- On-chain and off-chain dispute resolution
## Scalability
### Horizontal Scaling
- Add new datacenters as needed
- Scale blockchain network with new validators
- Expand edge sites for coverage
- Scale storage and compute independently
### Vertical Scaling
- Upgrade hardware in existing datacenters
- Increase capacity of existing infrastructure
- Optimize performance through tuning
### Auto-Scaling
- Kubernetes auto-scaling for workloads
- Storage auto-scaling based on demand
- Network bandwidth scaling
- Blockchain read replica scaling
## Performance Targets
### Latency
- Edge to user: < 10ms
- Regional to user: < 50ms
- Core to user: < 100ms
- Blockchain query: < 200ms (from read replica)
### Throughput
- Blockchain transactions: 1000+ TPS
- API requests: 100K+ RPS per region
- Storage IOPS: 100K+ per datacenter
- Network bandwidth: 100Gbps+ between core datacenters
### Availability
- Core datacenters: 99.99% uptime
- Regional datacenters: 99.9% uptime
- Edge sites: 99.5% uptime
- Blockchain network: 99.99% uptime
## Technology Stack Summary
### Blockchain
- **Platform**: Hyperledger Besu (recommended) or Quorum
- **Smart Contracts**: Solidity
- **Development**: Hardhat/Truffle
- **Integration**: Web3.js/Ethers.js
### Infrastructure
- **Orchestration**: Kubernetes
- **IaC**: Crossplane, Terraform
- **GitOps**: ArgoCD
- **Monitoring**: Prometheus, Grafana, Loki
### Storage
- **Distributed**: Ceph
- **Local**: ZFS
- **Object**: MinIO
- **Blockchain**: LevelDB/RocksDB
### Networking
- **Zero Trust**: Cloudflare
- **Tunnels**: Cloudflare Tunnels
- **Load Balancing**: Cloudflare + internal load balancers
### Identity
- **IAM**: Keycloak, OkraID
- **Blockchain Identity**: Smart contracts
- **SSI**: Self-sovereign identity support
## Related Documentation
- [Datacenter Architecture](./datacenter_architecture.md) - Detailed datacenter specifications
- [Blockchain EEA Architecture](./blockchain_eea_architecture.md) - Detailed blockchain architecture
- [Deployment Plan](./deployment_plan.md) - Deployment procedures
- [Hardware BOM](./hardware_bom.md) - Hardware specifications
- [Architecture Diagrams](../architecture/README.md) - Visual architecture diagrams

2
docs/treaty_framework.md Normal file
View File

@@ -0,0 +1,2 @@
# Treaty Framework
Legal structure...

138
gitops/README.md Normal file
View File

@@ -0,0 +1,138 @@
# GitOps Repository
This repository contains all infrastructure and application definitions managed via ArgoCD GitOps.
## Structure
```
gitops/
├── base/ # Base Kubernetes resources
│ ├── namespaces/ # Namespace definitions
│ ├── rbac/ # RBAC roles and bindings
│ └── kustomization.yaml # Base kustomization
├── overlays/ # Environment-specific overlays
│ ├── dev/ # Development environment
│ ├── staging/ # Staging environment
│ └── prod/ # Production environment
├── apps/ # ArgoCD Application definitions
│ ├── rancher/ # Rancher installation
│ ├── crossplane/ # Crossplane installation
│ ├── argocd/ # ArgoCD self-config
│ ├── vault/ # Vault installation
│ ├── monitoring/ # Prometheus, Grafana, Loki
│ └── portal/ # Portal deployment
├── infrastructure/ # Crossplane infrastructure definitions
│ ├── xrds/ # Composite Resource Definitions
│ ├── compositions/ # Composition templates
│ └── claims/ # Example claims
└── templates/ # Reusable templates
├── vm/ # VM templates
├── cluster/ # K8s cluster templates
└── network/ # Network templates
```
## Usage
### Bootstrap ArgoCD
1. Install ArgoCD on your cluster:
```bash
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
```
2. Apply the root ArgoCD Application:
```bash
kubectl apply -f apps/argocd/root-application.yaml
```
### Deploy to Specific Environment
```bash
# Development
kubectl apply -k overlays/dev/
# Production
kubectl apply -k overlays/prod/
```
## Environment Configuration
Each overlay directory contains:
- `kustomization.yaml` - Environment-specific patches
- `config/` - ConfigMaps and Secrets
- `patches/` - Strategic merge patches
## Infrastructure as Code
Crossplane XRDs and Compositions are defined in `infrastructure/`. These enable high-level resource provisioning through the portal.
### Example: Creating a VM
1. Create a claim:
```bash
kubectl apply -f infrastructure/claims/vm-claim-example.yaml
```
2. Monitor the resource:
```bash
kubectl get proxmoxvm web-server-01
kubectl describe proxmoxvm web-server-01
```
### Compositions
Compositions define reusable templates for common resources:
- `vm-ubuntu.yaml` - Ubuntu VM template
- Additional compositions can be added for other OS images
### Claims
Claims are user-facing resources that use compositions:
- `vm-claim-example.yaml` - Example VM claim
## GitOps Workflow
1. **Developer** creates/modifies resources in this repository
2. **Git** triggers ArgoCD sync (or manual sync)
3. **ArgoCD** applies changes to the cluster
4. **Crossplane** provisions infrastructure based on claims
5. **Monitoring** tracks resource status
## Best Practices
- Always use overlays for environment-specific configurations
- Keep base configurations generic and reusable
- Use Kustomize for configuration management
- Document all custom compositions
- Version control all infrastructure changes
## Troubleshooting
### ArgoCD Sync Issues
```bash
# Check ArgoCD application status
kubectl get applications -n argocd
# View sync logs
argocd app logs <app-name> --tail=100
```
### Crossplane Issues
```bash
# Check provider status
kubectl get providerconfig -n crossplane-system
# View resource events
kubectl describe proxmoxvm <vm-name>
```
## Related Documentation
- [ArgoCD Documentation](https://argo-cd.readthedocs.io/)
- [Crossplane Documentation](https://crossplane.io/docs/)
- [Kustomize Documentation](https://kustomize.io/)

View File

@@ -0,0 +1,26 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: root-apps
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/yourorg/hybrid-cloud-gitops
targetRevision: main
path: gitops/apps
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground
- PruneLast=true

View File

@@ -0,0 +1,50 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://charts.crossplane.io/stable
targetRevision: 1.14.0
chart: crossplane
helm:
releaseName: crossplane
values: |
args:
- --enable-usages
resourcesCrossplane:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
resourcesRBACManager:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 50m
memory: 64Mi
provider:
packages:
- crossplane/provider-kubernetes:v0.12.0
- crossplane/provider-helm:v0.15.0
- crossplane/provider-azure:v0.20.0
- crossplane/provider-aws:v0.40.0
- crossplane/provider-gcp:v0.35.0
destination:
server: https://kubernetes.default.svc
namespace: crossplane-system
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground

View File

@@ -0,0 +1,75 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: monitoring
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://prometheus-community.github.io/helm-charts
targetRevision: 48.0.0
chart: kube-prometheus-stack
helm:
releaseName: monitoring
values: |
prometheus:
prometheusSpec:
retention: 30d
storageSpec:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 500Gi
resources:
requests:
cpu: 1000m
memory: 4Gi
limits:
cpu: 2000m
memory: 8Gi
grafana:
enabled: true
adminPassword: changeme
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- grafana.yourdomain.com
persistence:
enabled: true
size: 10Gi
alertmanager:
alertmanagerSpec:
retention: 120h
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
prometheusOperator:
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
destination:
server: https://kubernetes.default.svc
namespace: monitoring
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground

View File

@@ -0,0 +1,60 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: loki
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://grafana.github.io/helm-charts
targetRevision: 0.69.0
chart: loki-stack
helm:
releaseName: loki
values: |
loki:
enabled: true
persistence:
enabled: true
size: 100Gi
config:
schema_config:
configs:
- from: "2024-01-01"
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /loki/boltdb-shipper-active
cache_location: /loki/boltdb-shipper-cache
shared_store: filesystem
filesystem:
directory: /loki/chunks
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
promtail:
enabled: true
config:
clients:
- url: http://loki:3100/loki/api/v1/push
grafana:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: monitoring
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: portal
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/yourorg/hybrid-cloud-gitops
targetRevision: main
path: gitops/apps/portal/manifests
destination:
server: https://kubernetes.default.svc
namespace: portal
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground

View File

@@ -0,0 +1,113 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: portal
namespace: portal
labels:
app: portal
spec:
replicas: 3
selector:
matchLabels:
app: portal
template:
metadata:
labels:
app: portal
spec:
containers:
- name: portal
image: yourregistry/portal:latest
ports:
- containerPort: 3000
name: http
env:
- name: NODE_ENV
value: "production"
- name: KEYCLOAK_URL
valueFrom:
configMapKeyRef:
name: portal-config
key: keycloak-url
- name: CROSSPLANE_API_URL
valueFrom:
configMapKeyRef:
name: portal-config
key: crossplane-api-url
- name: ARGOCD_URL
valueFrom:
configMapKeyRef:
name: portal-config
key: argocd-url
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2Gi
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: portal
namespace: portal
spec:
selector:
app: portal
ports:
- port: 80
targetPort: 3000
name: http
type: ClusterIP
---
apiVersion: v1
kind: ConfigMap
metadata:
name: portal-config
namespace: portal
data:
keycloak-url: "https://keycloak.yourdomain.com"
crossplane-api-url: "https://crossplane-api.crossplane-system.svc.cluster.local"
argocd-url: "https://argocd.yourdomain.com"
grafana-url: "https://grafana.yourdomain.com"
loki-url: "https://loki.monitoring.svc.cluster.local:3100"
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal
namespace: portal
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- portal.yourdomain.com
secretName: portal-tls
rules:
- host: portal.yourdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal
port:
number: 80

View File

@@ -0,0 +1,44 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: rancher
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/rancher/rancher
targetRevision: release/v2.8
path: charts/rancher
helm:
releaseName: rancher
values: |
hostname: rancher.yourdomain.com
replicas: 3
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
tls: external
rancherImage: rancher/rancher
rancherImageTag: v2.8.0
global:
cattle:
systemDefaultRegistry: ""
extraEnv:
- name: CATTLE_PROMETHEUS_METRICS
value: "true"
destination:
server: https://kubernetes.default.svc
namespace: rancher-system
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground
- PruneLast=true

View File

@@ -0,0 +1,54 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://helm.releases.hashicorp.com
targetRevision: 0.24.0
chart: vault
helm:
releaseName: vault
values: |
server:
ha:
enabled: true
replicas: 3
raft:
enabled: true
setNodeId: true
image:
repository: hashicorp/vault
tag: "1.15.0"
service:
type: ClusterIP
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- host: vault.yourdomain.com
paths:
- /
ui:
enabled: true
injector:
enabled: true
csi:
enabled: true
destination:
server: https://kubernetes.default.svc
namespace: vault
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground

View File

@@ -0,0 +1,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- namespaces/
- rbac/
commonLabels:
app.kubernetes.io/managed-by: argocd
app.kubernetes.io/part-of: phoenix-sankofa-cloud

View File

@@ -0,0 +1,56 @@
apiVersion: v1
kind: Namespace
metadata:
name: rancher-system
labels:
name: rancher-system
---
apiVersion: v1
kind: Namespace
metadata:
name: crossplane-system
labels:
name: crossplane-system
---
apiVersion: v1
kind: Namespace
metadata:
name: argocd
labels:
name: argocd
---
apiVersion: v1
kind: Namespace
metadata:
name: vault
labels:
name: vault
---
apiVersion: v1
kind: Namespace
metadata:
name: monitoring
labels:
name: monitoring
---
apiVersion: v1
kind: Namespace
metadata:
name: portal
labels:
name: portal
---
apiVersion: v1
kind: Namespace
metadata:
name: keycloak
labels:
name: keycloak
---
apiVersion: v1
kind: Namespace
metadata:
name: infrastructure
labels:
name: infrastructure

View File

@@ -0,0 +1,34 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hybrid-cloud-admin
rules:
- apiGroups: [""]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["apps"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["extensions"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["networking.k8s.io"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["pkg.crossplane.io"]
resources: ["*"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hybrid-cloud-admin-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hybrid-cloud-admin
subjects:
- kind: ServiceAccount
name: argocd-application-controller
namespace: argocd

View File

@@ -0,0 +1,26 @@
apiVersion: proxmox.yourorg.io/v1alpha1
kind: ProxmoxVM
metadata:
name: web-server-01
namespace: default
spec:
forProvider:
node: pve1
name: web-server-01
cpu: 4
memory: 8Gi
disk: 100Gi
storage: local-lvm
network: vmbr0
image: ubuntu-22.04-cloud
site: us-east-1
userData: |
#cloud-config
users:
- name: admin
ssh-authorized-keys:
- ssh-rsa AAAAB3NzaC1yc2E...
sshKeys:
- ssh-rsa AAAAB3NzaC1yc2E...
providerConfigRef:
name: proxmox-provider-config

Some files were not shown because too many files have changed in this diff Show More