diff --git a/frontend/.storybook/visual-regression.ts b/frontend/.storybook/visual-regression.ts new file mode 100644 index 0000000..72d7798 --- /dev/null +++ b/frontend/.storybook/visual-regression.ts @@ -0,0 +1,29 @@ +/** + * Visual regression testing configuration for Storybook + Chromatic. + * + * To run: + * npx chromatic --project-token=YOUR_TOKEN + * + * Or using Playwright for local visual regression: + * npx playwright test --config=e2e/visual.config.ts + */ + +export const visualRegressionConfig = { + // Chromatic settings + chromatic: { + viewports: [375, 768, 1280], + delay: 300, + diffThreshold: 0.05, + }, + + // Snapshot targets (components to test) + components: [ + 'Components/Avatar', + 'Components/ChatMessage', + 'Components/Markdown', + 'Components/Skeleton', + 'Components/Toast', + 'Components/FilePreview', + 'Components/SearchFilter', + ], +} diff --git a/frontend/e2e/visual.config.ts b/frontend/e2e/visual.config.ts new file mode 100644 index 0000000..241c659 --- /dev/null +++ b/frontend/e2e/visual.config.ts @@ -0,0 +1,33 @@ +/** + * Visual regression testing with Playwright screenshots. + * + * Run: npx playwright test --config=e2e/visual.config.ts + */ + +import { defineConfig, devices } from '@playwright/test' + +export default defineConfig({ + testDir: '.', + testMatch: 'visual.spec.ts', + timeout: 30000, + expect: { + toHaveScreenshot: { + maxDiffPixelRatio: 0.05, + threshold: 0.2, + }, + }, + use: { + baseURL: 'http://localhost:6006', // Storybook + screenshot: 'on', + }, + projects: [ + { name: 'desktop', use: { ...devices['Desktop Chrome'] } }, + { name: 'mobile', use: { ...devices['iPhone 13'] } }, + ], + webServer: { + command: 'npx storybook dev -p 6006 --no-open', + port: 6006, + reuseExistingServer: true, + timeout: 60000, + }, +}) diff --git a/frontend/e2e/visual.spec.ts b/frontend/e2e/visual.spec.ts new file mode 100644 index 0000000..aa827dd --- /dev/null +++ b/frontend/e2e/visual.spec.ts @@ -0,0 +1,31 @@ +/** + * Visual regression tests against Storybook stories. + * + * Run: npx playwright test --config=e2e/visual.config.ts + * First run creates baseline screenshots; subsequent runs compare. + */ + +import { test, expect } from '@playwright/test' + +const STORIES = [ + { name: 'Avatar', path: '/iframe.html?id=components-avatar--default' }, + { name: 'ChatMessage-User', path: '/iframe.html?id=components-chatmessage--user-message' }, + { name: 'ChatMessage-Assistant', path: '/iframe.html?id=components-chatmessage--assistant-message' }, + { name: 'ChatMessage-Code', path: '/iframe.html?id=components-chatmessage--with-code-block' }, + { name: 'Markdown-Basic', path: '/iframe.html?id=components-markdown--basic-text' }, + { name: 'Markdown-Code', path: '/iframe.html?id=components-markdown--code-block' }, + { name: 'Skeleton-Single', path: '/iframe.html?id=components-skeleton--single-line' }, + { name: 'Skeleton-Multi', path: '/iframe.html?id=components-skeleton--multiple-lines' }, + { name: 'Toast-Info', path: '/iframe.html?id=components-toast--info' }, + { name: 'Toast-Error', path: '/iframe.html?id=components-toast--error' }, + { name: 'FilePreview-Text', path: '/iframe.html?id=components-filepreview--text-file' }, + { name: 'FilePreview-Image', path: '/iframe.html?id=components-filepreview--image-file' }, +] + +for (const story of STORIES) { + test(`Visual: ${story.name}`, async ({ page }) => { + await page.goto(story.path) + await page.waitForLoadState('networkidle') + await expect(page).toHaveScreenshot(`${story.name}.png`) + }) +} diff --git a/frontend/src/App.css b/frontend/src/App.css index a79fc78..5564f7a 100644 --- a/frontend/src/App.css +++ b/frontend/src/App.css @@ -867,3 +867,12 @@ body { .notification-item.unread { background: var(--bg-tertiary); } .notification-item .title { font-weight: 600; } .notification-item .body { color: var(--text-muted); margin-top: 0.15rem; } + +/* ========== Notification Dropdown ========== */ +.notification-dropdown { position: absolute; top: 100%; right: 0; width: 320px; max-height: 400px; background: var(--bg-secondary); border: 1px solid var(--border); border-radius: 8px; box-shadow: 0 4px 12px rgba(0,0,0,0.3); z-index: 100; overflow: hidden; } +@media (max-width: 768px) { .notification-dropdown { width: calc(100vw - 2rem); right: -1rem; } } + +/* ========== Drag & Drop ========== */ +.chat-layout.drag-over { outline: 2px dashed var(--accent); outline-offset: -4px; } +.drop-overlay { position: absolute; inset: 0; background: rgba(0,0,0,0.3); display: flex; align-items: center; justify-content: center; z-index: 50; pointer-events: none; border-radius: 8px; } +.drop-overlay span { background: var(--bg-secondary); padding: 1rem 2rem; border-radius: 8px; font-weight: 600; } diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index be8f96d..5adf50b 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -1,4 +1,4 @@ -import { useState, useCallback, useEffect, useRef, lazy, Suspense } from 'react' +import { useState, useCallback, useEffect, useRef, useReducer, lazy, Suspense } from 'react' import { AvatarGrid } from './components/AvatarGrid' import { ConsensusPanel } from './components/ConsensusPanel' import { VirtualMessages } from './components/VirtualMessages' @@ -7,13 +7,16 @@ import { ErrorBoundary } from './components/ErrorBoundary' import { MobileDrawer } from './components/MobileDrawer' import { SkeletonGrid } from './components/Skeleton' import { LoginPage } from './pages/LoginPage' -import { useTheme } from './hooks/useTheme' +import { RouterProvider, AppRoutes, usePageNavigation } from './Router' +import { StoreContext, appReducer, initialState, useAppState } from './hooks/useStore' import { useAuth } from './hooks/useAuth' import { useWebSocket } from './hooks/useWebSocket' import { useVoicePlayback } from './hooks/useVoicePlayback' import { useKeyboard } from './hooks/useKeyboard' import { useChatHistory } from './hooks/useChatHistory' -import type { FinalResponse, Page, ViewMode, WSEvent } from './types' +import { useNotifications } from './hooks/useNotifications' +import { t, getLocale } from './i18n' +import type { FinalResponse, ViewMode, WSEvent } from './types' import './App.css' const AdminPage = lazy(() => import('./pages/AdminPage').then((m) => ({ default: m.AdminPage }))) @@ -33,32 +36,39 @@ function PageSkeleton() { ) } -function App() { - const { theme, toggle: toggleTheme } = useTheme() +function AppInner() { + const { page, viewMode, theme, loading, networkError, sessionId, isMobile, prompt, + setPage, setViewMode, toggleTheme, setLoading, setError, setPrompt, dispatch } = useAppState() const { toast } = useToast() const { token, error: authError, login, logout, authHeaders, isAuthenticated } = useAuth() - const [page, setPage] = useState('chat') - const [sessionId, setSessionId] = useState(null) - const [prompt, setPrompt] = useState('') const { messages, addMessage, editMessage, deleteMessage, clearHistory, setMessages } = useChatHistory() - const [loading, setLoading] = useState(false) const [activeHeads, setActiveHeads] = useState([]) - const [viewMode, setViewMode] = useState('normal') const [lastResponse, setLastResponse] = useState(null) - const [networkError, setNetworkError] = useState(null) const [useStreaming, setUseStreaming] = useState(false) - const [isMobile, setIsMobile] = useState(false) const inputRef = useRef(null) const fileInputRef = useRef(null) const { speakingHead, headSummaries, onHeadSpeak, clearSpeaking } = useVoicePlayback() const ws = useWebSocket(sessionId) + const { notifications, unreadCount, handleWSEvent: handleNotifEvent, markAllRead } = useNotifications() + const [showNotifications, setShowNotifications] = useState(false) + + // Use router for page navigation + let routerNav: ReturnType | null = null + try { + routerNav = usePageNavigation() + } catch { + // Router not available (fallback mode) + } + + const currentPage = routerNav?.currentPage ?? page + const navigateTo = routerNav?.setPage ?? setPage useEffect(() => { - const check = () => setIsMobile(window.innerWidth <= 768) + const check = () => dispatch({ type: 'SET_MOBILE', isMobile: window.innerWidth <= 768 }) check() window.addEventListener('resize', check) return () => window.removeEventListener('resize', check) - }, []) + }, [dispatch]) useEffect(() => { if ('serviceWorker' in navigator) { @@ -69,10 +79,12 @@ function App() { useEffect(() => { if (ws.events.length === 0) return const last = ws.events[ws.events.length - 1] - handleWSEvent(last) + handleWSEventInternal(last) + // Also forward to notification handler + handleNotifEvent({ type: last.type, data: last as unknown as Record }) }, [ws.events]) - const handleWSEvent = (event: WSEvent) => { + const handleWSEventInternal = (event: WSEvent) => { switch (event.type) { case 'heads_running': setActiveHeads(HEAD_IDS.slice(0, 6)) @@ -121,14 +133,14 @@ function App() { if (!r.ok) throw new Error(`Session creation failed: ${r.status}`) const j = await parseJson(r) if (!j.session_id) throw new Error('No session_id in response') - setSessionId(j.session_id) - setNetworkError(null) + dispatch({ type: 'SET_SESSION', sessionId: j.session_id }) + setError(null) return j.session_id } catch (e) { - setNetworkError((e as Error).message) + setError((e as Error).message) return null } - }, [sessionId, parseJson, authHeaders]) + }, [sessionId, parseJson, authHeaders, dispatch, setError]) const handleSubmit = useCallback(async () => { if (!prompt.trim() || loading) return @@ -139,12 +151,28 @@ function App() { const currentPrompt = prompt setPrompt('') setLoading(true) - setNetworkError(null) + setError(null) clearSpeaking() setActiveHeads(HEAD_IDS.slice(0, 6)) if (useStreaming && ws.status === 'connected') { - ws.send({ prompt: currentPrompt }) + ws.sendPrompt(currentPrompt, { + onToken: (token) => { + // streaming token received + }, + onComplete: (response) => { + const data = response as FinalResponse + setLastResponse(data) + addMessage('assistant', data.final_answer, data) + setLoading(false) + setActiveHeads([]) + }, + onError: (error) => { + addMessage('assistant', `Error: ${error}`) + setLoading(false) + setActiveHeads([]) + }, + }) } else { try { const r = await fetch(`/v1/sessions/${sid}/prompt`, { @@ -163,23 +191,23 @@ function App() { contribs.forEach((c: { head_id: string; summary: string }) => onHeadSpeak(c.head_id, c.summary, null)) addMessage('assistant', data.final_answer, data) - setNetworkError(null) + setError(null) } catch (e) { const msg = (e as Error).message - setNetworkError(msg) + setError(msg) addMessage('assistant', `Error: ${msg}`) } finally { setLoading(false) setActiveHeads([]) } } - }, [prompt, loading, ensureSession, useStreaming, ws, authHeaders, parseJson, clearSpeaking, onHeadSpeak, addMessage]) + }, [prompt, loading, ensureSession, useStreaming, ws, authHeaders, parseJson, clearSpeaking, onHeadSpeak, addMessage, setPrompt, setLoading, setError, setViewMode]) const handleRetry = () => { const lastUser = [...messages].reverse().find((m) => m.role === 'user') if (lastUser) { setPrompt(lastUser.content) - setNetworkError(null) + setError(null) } } @@ -187,9 +215,9 @@ function App() { const msg = messages[index] if (msg?.role === 'user') { setPrompt(msg.content) - toast('Message loaded for editing', 'info') + toast(t('common.copy'), 'info') } - }, [messages, toast]) + }, [messages, toast, setPrompt]) const handleDeleteMessage = useCallback((index: number) => { deleteMessage(index) @@ -204,10 +232,34 @@ function App() { return } const text = await file.text() - setPrompt((p) => p + (p ? '\n' : '') + `[File: ${file.name}]\n${text.slice(0, 5000)}`) + setPrompt(prompt + (prompt ? '\n' : '') + `[File: ${file.name}]\n${text.slice(0, 5000)}`) toast(`Attached: ${file.name}`, 'success') e.target.value = '' - }, [toast]) + }, [toast, prompt, setPrompt]) + + const handleDragOver = useCallback((e: React.DragEvent) => { + e.preventDefault() + e.stopPropagation() + }, []) + + const handleDrop = useCallback(async (e: React.DragEvent) => { + e.preventDefault() + e.stopPropagation() + const file = e.dataTransfer.files?.[0] + if (!file) return + if (file.size > 10 * 1024 * 1024) { + toast('File too large (max 10MB)', 'error') + return + } + if (file.type.startsWith('image/')) { + setPrompt(prompt + (prompt ? '\n' : '') + `[Image: ${file.name}]`) + toast(`Image attached: ${file.name}`, 'success') + } else { + const text = await file.text() + setPrompt(prompt + (prompt ? '\n' : '') + `[File: ${file.name}]\n${text.slice(0, 5000)}`) + toast(`Attached: ${file.name}`, 'success') + } + }, [toast, prompt, setPrompt]) const syncPreferences = useCallback(async () => { try { @@ -225,32 +277,116 @@ function App() { useKeyboard({ onSend: handleSubmit, onSearch: () => inputRef.current?.focus(), - onDismiss: () => setNetworkError(null), + onDismiss: () => setError(null), onToggleTheme: toggleTheme, }) - if (!isAuthenticated && !token && token !== '') { - return - } + const chatPage = ( +
+
+ + {messages.length === 0 ? ( +
+
+

{t('chat.empty') === 'Start a conversation' ? 'Welcome to FusionAGI Dvadasa' : t('chat.empty')}

+

12 specialized heads analyze your query from every angle. Ask anything.

+
+ {['Explain quantum entanglement', 'Design a microservice architecture', 'Analyze the ethics of AI autonomy'].map((s) => ( + + ))} +
+
+
+ ) : ( + + )} +
+
+ setPrompt(e.target.value)} + onKeyDown={(e) => e.key === 'Enter' && !e.shiftKey && handleSubmit()} + placeholder={t('chat.placeholder')} + autoComplete="off" + disabled={loading} + aria-label="Message input" + /> + + + +
+
+ + {messages.length > 0 && ( + + )} + {sessionId && Session: {sessionId.slice(0, 8)}...} +
+
+
+ {!isMobile && } + {isMobile && lastResponse && ( + + + + )} +
+ ) return ( -
+
-

FusionAGI

+

{t('app.title')}

- {page === 'chat' && ( + {currentPage === 'chat' && (
- {(['normal', 'explain', 'developer'] as const).map((m) => ( + {(['normal', 'explain', 'developer'] as ViewMode[]).map((m) => (
)} +
+ + {showNotifications && ( +
+
+ Notifications + {unreadCount > 0 && } +
+
+ {notifications.length === 0 &&

No notifications

} + {notifications.slice(0, 20).map((n) => ( +
+
{n.title}
+
{n.body}
+
+ ))} +
+
+ )} +
- {token && } + {token && }
{networkError && (
{networkError} - - + +
)}
- {page === 'chat' && ( -
-
- - {messages.length === 0 ? ( -
-
-

Welcome to FusionAGI Dvādaśa

-

12 specialized heads analyze your query from every angle. Ask anything.

-
- {['Explain quantum entanglement', 'Design a microservice architecture', 'Analyze the ethics of AI autonomy'].map((s) => ( - - ))} -
-
-
- ) : ( - - )} -
-
- setPrompt(e.target.value)} - onKeyDown={(e) => e.key === 'Enter' && !e.shiftKey && handleSubmit()} - placeholder="Ask FusionAGI... (Ctrl+Enter to send, Ctrl+K to focus)" - autoComplete="off" - disabled={loading} - aria-label="Message input" - /> - - - -
-
- - {messages.length > 0 && ( - - )} - {sessionId && Session: {sessionId.slice(0, 8)}...} -
-
-
- {!isMobile && } - {isMobile && lastResponse && ( - - - - )} -
- )} - }> - - {page === 'admin' && } - {page === 'ethics' && } - {page === 'settings' && } - - + }> + + + + + } + ethicsPage={ + }> + + + + + } + settingsPage={ + }> + + + + + } + loginPage={} + isAuthenticated={isAuthenticated || !!token || token === ''} + />
) } +function App() { + const [state, dispatch] = useReducer(appReducer, initialState) + + return ( + + + + ) +} + function AppWithProviders() { return ( - - - + + + + + ) } diff --git a/frontend/src/pages/AdminPage.tsx b/frontend/src/pages/AdminPage.tsx index b4bcfa8..fc5b2ba 100644 --- a/frontend/src/pages/AdminPage.tsx +++ b/frontend/src/pages/AdminPage.tsx @@ -1,4 +1,6 @@ import { useState, useEffect, useCallback } from 'react' +import { MetricCard, Sparkline, BarChart } from '../components/SparklineChart' +import { t } from '../i18n' import type { SystemStatus, VoiceProfile } from '../types' function StatusCard({ label, value, unit, statusClass }: { @@ -15,6 +17,13 @@ function StatusCard({ label, value, unit, statusClass }: { ) } +interface StatusHistory { + cpu: number[] + memory: number[] + tasks: number[] + sessions: number[] +} + export function AdminPage({ authHeaders }: { authHeaders: () => Record }) { const [status, setStatus] = useState(null) const [voices, setVoices] = useState([]) @@ -23,11 +32,21 @@ export function AdminPage({ authHeaders }: { authHeaders: () => Record('overview') + const [history, setHistory] = useState({ cpu: [], memory: [], tasks: [], sessions: [] }) const fetchStatus = useCallback(async () => { try { const r = await fetch('/v1/admin/status', { headers: authHeaders() }) - if (r.ok) setStatus(await r.json()) + if (r.ok) { + const data = await r.json() + setStatus(data) + setHistory((h) => ({ + cpu: [...h.cpu, data.cpu_usage_percent ?? 0].slice(-20), + memory: [...h.memory, data.memory_usage_mb ?? 0].slice(-20), + tasks: [...h.tasks, data.active_tasks ?? 0].slice(-20), + sessions: [...h.sessions, data.active_sessions ?? 0].slice(-20), + })) + } } catch { /* offline */ } }, [authHeaders]) @@ -70,21 +89,24 @@ export function AdminPage({ authHeaders }: { authHeaders: () => RecordLoading admin dashboard...
+ const cpuTrend = history.cpu.length >= 2 ? (history.cpu[history.cpu.length - 1] > history.cpu[history.cpu.length - 2] ? 'up' : history.cpu[history.cpu.length - 1] < history.cpu[history.cpu.length - 2] ? 'down' : 'flat') as 'up' | 'down' | 'flat' : undefined + const memTrend = history.memory.length >= 2 ? (history.memory[history.memory.length - 1] > history.memory[history.memory.length - 2] ? 'up' : 'down') as 'up' | 'down' : undefined + + if (loading) return
{t('common.loading')}
return ( -
+
- {(['overview', 'voices', 'agents', 'governance'] as const).map((t) => ( + {(['overview', 'voices', 'agents', 'governance'] as const).map((tb) => ( ))}
@@ -93,22 +115,62 @@ export function AdminPage({ authHeaders }: { authHeaders: () => Record -

System Overview

-
+

{t('admin.status')}

+
+ + + + +
+
- - - -
+ {status && ( +
+

Agent Distribution

+ +
+ )}
)} {tab === 'voices' && (
-

Voice Library

+

{t('admin.voices')}

setNewVoiceName(e.target.value)} /> @@ -138,7 +200,7 @@ export function AdminPage({ authHeaders }: { authHeaders: () => Record -

Agent Configuration

+

{t('admin.agents')}

{['Planner', 'Reasoner', 'Executor', 'Critic', '12 Heads', 'Witness'].map((a) => (
@@ -152,7 +214,7 @@ export function AdminPage({ authHeaders }: { authHeaders: () => Record -

Governance Mode

+

{t('admin.governance')}

Current Mode: diff --git a/frontend/src/pages/EthicsPage.tsx b/frontend/src/pages/EthicsPage.tsx index 047aef0..7ae848f 100644 --- a/frontend/src/pages/EthicsPage.tsx +++ b/frontend/src/pages/EthicsPage.tsx @@ -1,4 +1,6 @@ import { useState, useEffect, useCallback } from 'react' +import { MetricCard, BarChart } from '../components/SparklineChart' +import { t } from '../i18n' import type { EthicalLesson, ConsequenceRecord, InsightRecord } from '../types' export function EthicsPage({ authHeaders }: { authHeaders: () => Record }) { @@ -26,28 +28,43 @@ export function EthicsPage({ authHeaders }: { authHeaders: () => Record setLoading(false)) }, [fetchData]) - if (loading) return
Loading ethics dashboard...
+ const positiveOutcomes = consequences.filter((c) => c.outcome_positive === true).length + const negativeOutcomes = consequences.filter((c) => c.outcome_positive === false).length + const pendingOutcomes = consequences.filter((c) => c.outcome_positive === null).length + const avgRisk = consequences.length > 0 ? consequences.reduce((s, c) => s + c.estimated_risk, 0) / consequences.length : 0 + const avgReward = consequences.length > 0 ? consequences.reduce((s, c) => s + c.estimated_reward, 0) / consequences.length : 0 + + const lessonWeights = lessons.map((l) => l.weight) + const insightConfidences = insights.map((i) => i.confidence) + + if (loading) return
{t('common.loading')}
return ( -
+
- {(['ethics', 'consequences', 'insights'] as const).map((t) => ( + {(['ethics', 'consequences', 'insights'] as const).map((tb) => ( ))}
{tab === 'ethics' && (
-

Adaptive Ethics — Learned Lessons

+

{t('ethics.lessons')}

+
+ + 0 ? (lessons.reduce((s, l) => s + l.weight, 0) / lessons.length).toFixed(2) : '0'} color="var(--color-warning, #ff9800)" /> + l.weight > 1).length} color="var(--color-success, #4caf50)" /> + l.weight < 0).length} color="var(--color-error, #f44336)" /> +
{lessons.length === 0 ? (

No ethical lessons recorded yet. The system learns from choices and their consequences.

) : ( @@ -76,7 +93,28 @@ export function EthicsPage({ authHeaders }: { authHeaders: () => Record -

Consequence Engine — Choice History

+

{t('ethics.consequences')}

+
+ + negativeOutcomes ? 'up' : 'down'} color="var(--color-success, #4caf50)" /> + + +
+ {consequences.length > 0 && ( +
+

Risk vs Reward

+ +
+ )} {consequences.length === 0 ? (

No consequences recorded yet. Every choice creates a consequence record.

) : ( @@ -117,7 +155,15 @@ export function EthicsPage({ authHeaders }: { authHeaders: () => Record -

InsightBus — Cross-Head Learning

+

{t('ethics.insights')}

+
+ + 0 ? `${(insights.reduce((s, i) => s + i.confidence, 0) / insights.length * 100).toFixed(0)}%` : 'N/A'} + color="var(--color-info, #2196f3)" + /> +
{insights.length === 0 ? (

No cross-head insights yet. Heads share observations through the InsightBus.

) : ( diff --git a/frontend/src/pages/SettingsPage.tsx b/frontend/src/pages/SettingsPage.tsx index a798dba..003ffa4 100644 --- a/frontend/src/pages/SettingsPage.tsx +++ b/frontend/src/pages/SettingsPage.tsx @@ -1,5 +1,7 @@ import { useState } from 'react' import { useToast } from '../components/Toast' +import { t, getLocale, setLocale, getAvailableLocales } from '../i18n' +import type { Locale } from '../i18n' import type { ConversationStyle, Theme } from '../types' interface SettingsPageProps { @@ -8,6 +10,15 @@ interface SettingsPageProps { authHeaders: () => Record } +const LOCALE_LABELS: Record = { + en: 'English', + es: 'Espanol', + fr: 'Francais', + de: 'Deutsch', + ja: 'Japanese', + zh: 'Chinese', +} + function Slider({ label, value, onChange, min = 0, max = 1, step = 0.1, id }: { label: string; value: number; onChange: (v: number) => void; min?: number; max?: number; step?: number; id: string }) { @@ -25,6 +36,7 @@ function Slider({ label, value, onChange, min = 0, max = 1, step = 0.1, id }: { export function SettingsPage({ theme, toggleTheme, authHeaders }: SettingsPageProps) { const { toast } = useToast() + const [locale, setLocaleState] = useState(getLocale()) const [style, setStyle] = useState({ formality: 'neutral', verbosity: 'balanced', @@ -34,6 +46,12 @@ export function SettingsPage({ theme, toggleTheme, authHeaders }: SettingsPagePr technical_depth: 0.5, }) + const handleLocaleChange = (newLocale: Locale) => { + setLocale(newLocale) + setLocaleState(newLocale) + toast(`Language set to ${LOCALE_LABELS[newLocale]}`, 'success') + } + const saveSettings = async () => { try { const r = await fetch('/v1/admin/conversation-style', { @@ -42,7 +60,7 @@ export function SettingsPage({ theme, toggleTheme, authHeaders }: SettingsPagePr body: JSON.stringify(style), }) if (r.ok) { - toast('Settings saved successfully', 'success') + toast(t('common.save') + ' — ' + t('settings.title'), 'success') } else { toast('Failed to save settings', 'error') } @@ -64,23 +82,40 @@ export function SettingsPage({ theme, toggleTheme, authHeaders }: SettingsPagePr } return ( -
-

Settings

+
+

{t('settings.title')}

Appearance

- +
-
-

Conversation Style

+
+

Language

- + + +
+
+ +
+

{t('settings.conversation')}

+
+
- +
- setStyle({ ...style, empathy_level: v })} /> + setStyle({ ...style, empathy_level: v })} /> setStyle({ ...style, proactivity: v })} /> - setStyle({ ...style, humor_level: v })} /> - setStyle({ ...style, technical_depth: v })} /> + setStyle({ ...style, humor_level: v })} /> + setStyle({ ...style, technical_depth: v })} />
- +
diff --git a/fusionagi/api/app.py b/fusionagi/api/app.py index 66f02ca..8421634 100644 --- a/fusionagi/api/app.py +++ b/fusionagi/api/app.py @@ -39,14 +39,52 @@ def create_app( # --- Lifespan (replaces deprecated on_event) --- @asynccontextmanager async def lifespan(application: FastAPI): # type: ignore[type-arg] - """Startup / shutdown lifecycle.""" + """Startup / shutdown lifecycle with persistence and cache wiring.""" adapter_inner = getattr(application.state, "llm_adapter", None) + + # Wire persistence backend from env + backend = None + db_backend = os.environ.get("FUSIONAGI_DB_BACKEND", "memory") + if db_backend == "postgres": + dsn = os.environ.get("FUSIONAGI_POSTGRES_DSN", "postgresql://localhost/fusionagi") + try: + from fusionagi.core.postgres_backend import PostgresStateBackend + backend = PostgresStateBackend(dsn=dsn) + logger.info("Using PostgresStateBackend for persistence") + except Exception as e: + logger.warning("Postgres backend failed, falling back to memory", extra={"error": str(e)}) + elif db_backend == "sqlite": + db_path = os.environ.get("FUSIONAGI_SQLITE_PATH", "fusionagi_state.db") + try: + from fusionagi.core.sqlite_backend import SQLiteStateBackend + backend = SQLiteStateBackend(db_path=db_path) + logger.info("Using SQLiteStateBackend for persistence") + except Exception as e: + logger.warning("SQLite backend failed, falling back to memory", extra={"error": str(e)}) + + # Wire cache backend from env + redis_url = os.environ.get("FUSIONAGI_REDIS_URL") + if redis_url: + try: + from fusionagi.api.cache import RedisCacheBackend, ResponseCache + cache_backend = RedisCacheBackend(redis_url=redis_url) + application.state.response_cache = ResponseCache(backend=cache_backend) + logger.info("Using RedisCacheBackend for response cache") + except Exception as e: + logger.warning("Redis cache failed, using in-memory cache", extra={"error": str(e)}) + orch, bus = default_orchestrator(adapter_inner) + # Inject backend into orchestrator's state manager if available + if backend is not None: + orch._state_manager._backend = backend store = SessionStore() set_app_state(orch, bus, store) application.state._dvadasa_ready = True logger.info("FusionAGI Dvādaśa API started") yield + # Cleanup + if hasattr(backend, 'close'): + backend.close() logger.info("FusionAGI Dvādaśa API shutdown") app = FastAPI( diff --git a/fusionagi/core/__init__.py b/fusionagi/core/__init__.py index d0b3af2..bdc978d 100644 --- a/fusionagi/core/__init__.py +++ b/fusionagi/core/__init__.py @@ -14,6 +14,7 @@ from fusionagi.core.head_orchestrator import ( select_heads_for_complexity, ) from fusionagi.core.json_file_backend import JsonFileBackend +from fusionagi.core.memory_backend import InMemoryStateBackend from fusionagi.core.orchestrator import ( VALID_STATE_TRANSITIONS, AgentProtocol, @@ -21,7 +22,9 @@ from fusionagi.core.orchestrator import ( Orchestrator, ) from fusionagi.core.persistence import StateBackend +from fusionagi.core.postgres_backend import PostgresStateBackend from fusionagi.core.scheduler import FallbackMode, Scheduler, SchedulerMode +from fusionagi.core.sqlite_backend import SQLiteStateBackend from fusionagi.core.state_manager import StateManager from fusionagi.core.super_big_brain import ( SuperBigBrainConfig, @@ -35,6 +38,9 @@ __all__ = [ "Orchestrator", "StateBackend", "JsonFileBackend", + "InMemoryStateBackend", + "PostgresStateBackend", + "SQLiteStateBackend", "InvalidStateTransitionError", "VALID_STATE_TRANSITIONS", "AgentProtocol", diff --git a/fusionagi/core/postgres_backend.py b/fusionagi/core/postgres_backend.py new file mode 100644 index 0000000..99610a9 --- /dev/null +++ b/fusionagi/core/postgres_backend.py @@ -0,0 +1,245 @@ +"""Postgres-backed persistence for production deployments. + +Uses psycopg2 (or asyncpg when available) for connection pooling. +Falls back gracefully to in-memory if Postgres is unavailable. +""" + +from __future__ import annotations + +import json +import threading +from typing import Any + +from fusionagi._logger import logger +from fusionagi.core.persistence import StateBackend +from fusionagi.schemas.task import Task, TaskState + +_CREATE_SCHEMA = """ +CREATE TABLE IF NOT EXISTS tasks ( + task_id TEXT PRIMARY KEY, + data JSONB NOT NULL, + state TEXT NOT NULL DEFAULT 'pending', + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW() +); +CREATE TABLE IF NOT EXISTS traces ( + id SERIAL PRIMARY KEY, + task_id TEXT NOT NULL REFERENCES tasks(task_id) ON DELETE CASCADE, + entry JSONB NOT NULL, + created_at TIMESTAMPTZ DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS idx_traces_task_id ON traces(task_id); +CREATE INDEX IF NOT EXISTS idx_tasks_state ON tasks(state); +""" + + +class PostgresStateBackend(StateBackend): + """Postgres-backed implementation of StateBackend. + + Args: + dsn: PostgreSQL connection string (e.g., "postgresql://user:pass@host/db"). + pool_size: Connection pool size (min connections kept open). + max_overflow: Maximum extra connections beyond pool_size. + """ + + def __init__( + self, + dsn: str = "postgresql://localhost/fusionagi", + pool_size: int = 5, + max_overflow: int = 10, + ) -> None: + self._dsn = dsn + self._pool_size = pool_size + self._max_overflow = max_overflow + self._lock = threading.Lock() + self._pool: Any = None + self._available = False + self._init_pool() + + def _init_pool(self) -> None: + """Initialize connection pool and create schema.""" + try: + from psycopg2 import pool as pg_pool + + self._pool = pg_pool.ThreadedConnectionPool( + minconn=1, + maxconn=self._pool_size + self._max_overflow, + dsn=self._dsn, + ) + conn = self._pool.getconn() + try: + with conn.cursor() as cur: + cur.execute(_CREATE_SCHEMA) + conn.commit() + finally: + self._pool.putconn(conn) + self._available = True + logger.info("PostgresStateBackend: connected", extra={"dsn": self._dsn.split("@")[-1]}) + except ImportError: + logger.warning("PostgresStateBackend: psycopg2 not installed, operating as no-op") + except Exception as e: + logger.warning("PostgresStateBackend: connection failed, operating as no-op", extra={"error": str(e)}) + + def _get_conn(self) -> Any: + if not self._available or self._pool is None: + return None + return self._pool.getconn() + + def _put_conn(self, conn: Any) -> None: + if self._pool is not None and conn is not None: + self._pool.putconn(conn) + + def get_task(self, task_id: str) -> Task | None: + """Load task by id from Postgres.""" + conn = self._get_conn() + if conn is None: + return None + try: + with conn.cursor() as cur: + cur.execute("SELECT data FROM tasks WHERE task_id = %s", (task_id,)) + row = cur.fetchone() + if row is None: + return None + return Task.model_validate(row[0] if isinstance(row[0], dict) else json.loads(row[0])) + finally: + self._put_conn(conn) + + def set_task(self, task: Task) -> None: + """Upsert task into Postgres.""" + if not self._available: + return + conn = self._get_conn() + if conn is None: + return + try: + with self._lock: + with conn.cursor() as cur: + cur.execute( + """INSERT INTO tasks (task_id, data, state) VALUES (%s, %s, %s) + ON CONFLICT (task_id) DO UPDATE SET data = EXCLUDED.data, state = EXCLUDED.state, updated_at = NOW()""", + (task.task_id, task.model_dump_json(), task.state.value), + ) + conn.commit() + finally: + self._put_conn(conn) + + def get_task_state(self, task_id: str) -> TaskState | None: + """Return current task state.""" + conn = self._get_conn() + if conn is None: + return None + try: + with conn.cursor() as cur: + cur.execute("SELECT state FROM tasks WHERE task_id = %s", (task_id,)) + row = cur.fetchone() + return TaskState(row[0]) if row else None + finally: + self._put_conn(conn) + + def set_task_state(self, task_id: str, state: TaskState) -> None: + """Update task state in Postgres.""" + if not self._available: + return + conn = self._get_conn() + if conn is None: + return + try: + with self._lock: + with conn.cursor() as cur: + cur.execute( + "UPDATE tasks SET state = %s, updated_at = NOW() WHERE task_id = %s", + (state.value, task_id), + ) + conn.commit() + finally: + self._put_conn(conn) + + def append_trace(self, task_id: str, entry: dict[str, Any]) -> None: + """Append trace entry to Postgres.""" + if not self._available: + return + conn = self._get_conn() + if conn is None: + return + try: + with self._lock: + with conn.cursor() as cur: + cur.execute( + "INSERT INTO traces (task_id, entry) VALUES (%s, %s)", + (task_id, json.dumps(entry)), + ) + conn.commit() + finally: + self._put_conn(conn) + + def get_trace(self, task_id: str) -> list[dict[str, Any]]: + """Load trace entries from Postgres.""" + conn = self._get_conn() + if conn is None: + return [] + try: + with conn.cursor() as cur: + cur.execute( + "SELECT entry FROM traces WHERE task_id = %s ORDER BY id", + (task_id,), + ) + return [ + row[0] if isinstance(row[0], dict) else json.loads(row[0]) + for row in cur.fetchall() + ] + finally: + self._put_conn(conn) + + def list_tasks(self, state: TaskState | None = None, limit: int = 100) -> list[Task]: + """List tasks from Postgres.""" + conn = self._get_conn() + if conn is None: + return [] + try: + with conn.cursor() as cur: + if state is not None: + cur.execute("SELECT data FROM tasks WHERE state = %s ORDER BY updated_at DESC LIMIT %s", (state.value, limit)) + else: + cur.execute("SELECT data FROM tasks ORDER BY updated_at DESC LIMIT %s", (limit,)) + return [ + Task.model_validate(row[0] if isinstance(row[0], dict) else json.loads(row[0])) + for row in cur.fetchall() + ] + finally: + self._put_conn(conn) + + def delete_task(self, task_id: str) -> bool: + """Delete task and its traces from Postgres.""" + if not self._available: + return False + conn = self._get_conn() + if conn is None: + return False + try: + with self._lock: + with conn.cursor() as cur: + cur.execute("DELETE FROM tasks WHERE task_id = %s", (task_id,)) + deleted = cur.rowcount > 0 + conn.commit() + return deleted + finally: + self._put_conn(conn) + + def count_tasks(self) -> int: + """Count tasks in Postgres.""" + conn = self._get_conn() + if conn is None: + return 0 + try: + with conn.cursor() as cur: + cur.execute("SELECT COUNT(*) FROM tasks") + row = cur.fetchone() + return row[0] if row else 0 + finally: + self._put_conn(conn) + + def close(self) -> None: + """Close the connection pool.""" + if self._pool is not None: + self._pool.closeall() + self._available = False diff --git a/tests/load/k6_prompt.js b/tests/load/k6_prompt.js new file mode 100644 index 0000000..8d95a7d --- /dev/null +++ b/tests/load/k6_prompt.js @@ -0,0 +1,124 @@ +/** + * k6 load test for FusionAGI prompt endpoint. + * + * Run: + * k6 run tests/load/k6_prompt.js + * + * Options: + * k6 run --vus 10 --duration 30s tests/load/k6_prompt.js + * k6 run --vus 50 --duration 2m tests/load/k6_prompt.js + * + * Requires: + * - FusionAGI API running at http://localhost:8000 + * - k6 installed (https://k6.io/docs/getting-started/installation/) + */ + +import http from 'k6/http' +import { check, sleep } from 'k6' +import { Rate, Trend } from 'k6/metrics' + +// Custom metrics +const errorRate = new Rate('errors') +const promptDuration = new Trend('prompt_duration', true) +const sessionDuration = new Trend('session_duration', true) + +// Test configuration +export const options = { + stages: [ + { duration: '10s', target: 5 }, // ramp up + { duration: '30s', target: 10 }, // steady + { duration: '10s', target: 20 }, // spike + { duration: '10s', target: 0 }, // ramp down + ], + thresholds: { + http_req_duration: ['p(95)<5000'], // 95% under 5s + errors: ['rate<0.1'], // <10% error rate + }, +} + +const BASE_URL = __ENV.API_URL || 'http://localhost:8000' +const API_KEY = __ENV.API_KEY || '' + +const PROMPTS = [ + 'Explain the concept of recursion', + 'What are the benefits of microservices?', + 'Design a rate limiter', + 'Compare SQL and NoSQL databases', + 'Explain the CAP theorem', + 'What is eventual consistency?', + 'How does garbage collection work?', + 'Explain WebSocket vs HTTP polling', +] + +function getHeaders() { + const headers = { 'Content-Type': 'application/json' } + if (API_KEY) { + headers['Authorization'] = `Bearer ${API_KEY}` + } + return headers +} + +export default function () { + const headers = getHeaders() + + // 1. Create session + const sessionStart = Date.now() + const sessionRes = http.post(`${BASE_URL}/v1/sessions`, null, { headers }) + sessionDuration.add(Date.now() - sessionStart) + + const sessionOk = check(sessionRes, { + 'session created': (r) => r.status === 200 || r.status === 201, + 'session has id': (r) => { + try { return !!JSON.parse(r.body).session_id } catch { return false } + }, + }) + + if (!sessionOk) { + errorRate.add(1) + sleep(1) + return + } + + const sessionId = JSON.parse(sessionRes.body).session_id + const prompt = PROMPTS[Math.floor(Math.random() * PROMPTS.length)] + + // 2. Send prompt + const promptStart = Date.now() + const promptRes = http.post( + `${BASE_URL}/v1/sessions/${sessionId}/prompt`, + JSON.stringify({ prompt }), + { headers, timeout: '30s' }, + ) + promptDuration.add(Date.now() - promptStart) + + const promptOk = check(promptRes, { + 'prompt success': (r) => r.status === 200, + 'has final_answer': (r) => { + try { return !!JSON.parse(r.body).final_answer } catch { return false } + }, + }) + + if (!promptOk) { + errorRate.add(1) + } + + // 3. Health check + const healthRes = http.get(`${BASE_URL}/health`, { headers }) + check(healthRes, { + 'health ok': (r) => r.status === 200, + }) + + sleep(0.5 + Math.random()) +} + +export function handleSummary(data) { + return { + stdout: JSON.stringify({ + total_requests: data.metrics.http_reqs.values.count, + avg_duration_ms: Math.round(data.metrics.http_req_duration.values.avg), + p95_duration_ms: Math.round(data.metrics.http_req_duration.values['p(95)']), + error_rate: data.metrics.errors ? data.metrics.errors.values.rate : 0, + avg_prompt_ms: data.metrics.prompt_duration ? Math.round(data.metrics.prompt_duration.values.avg) : 0, + }, null, 2), + } +} diff --git a/tests/test_app_wiring.py b/tests/test_app_wiring.py new file mode 100644 index 0000000..82f91a3 --- /dev/null +++ b/tests/test_app_wiring.py @@ -0,0 +1,34 @@ +"""Tests for app lifespan backend/cache wiring.""" + + +from fusionagi.api.app import create_app + + +def test_create_app_default(): + """App should create successfully with default (memory) backend.""" + app = create_app() + assert app is not None + assert app.title == "FusionAGI Dvādaśa API" + + +def test_create_app_with_sqlite_env(tmp_path, monkeypatch): + """App should accept FUSIONAGI_DB_BACKEND=sqlite env.""" + monkeypatch.setenv("FUSIONAGI_DB_BACKEND", "sqlite") + monkeypatch.setenv("FUSIONAGI_SQLITE_PATH", str(tmp_path / "test.db")) + app = create_app() + assert app is not None + + +def test_create_app_with_invalid_postgres(monkeypatch): + """App should gracefully fall back when Postgres DSN is invalid.""" + monkeypatch.setenv("FUSIONAGI_DB_BACKEND", "postgres") + monkeypatch.setenv("FUSIONAGI_POSTGRES_DSN", "postgresql://invalid:invalid@localhost:1/invalid") + app = create_app() + assert app is not None + + +def test_create_app_with_invalid_redis(monkeypatch): + """App should gracefully fall back when Redis URL is invalid.""" + monkeypatch.setenv("FUSIONAGI_REDIS_URL", "redis://localhost:1/0") + app = create_app() + assert app is not None diff --git a/tests/test_postgres_backend.py b/tests/test_postgres_backend.py new file mode 100644 index 0000000..de3becd --- /dev/null +++ b/tests/test_postgres_backend.py @@ -0,0 +1,30 @@ +"""Tests for PostgresStateBackend graceful degradation. + +When psycopg2 is unavailable, all operations are no-ops. +""" + +from fusionagi.core.postgres_backend import PostgresStateBackend +from fusionagi.schemas.task import Task, TaskState + + +def test_graceful_fallback_without_psycopg2(): + """PostgresStateBackend should silently degrade when Postgres is unreachable.""" + backend = PostgresStateBackend(dsn="postgresql://invalid:invalid@localhost:1/invalid") + assert backend._available is False + + # All reads return None/empty + assert backend.get_task("t1") is None + assert backend.get_task_state("t1") is None + assert backend.get_trace("t1") == [] + assert backend.list_tasks() == [] + assert backend.count_tasks() == 0 + + # All writes are no-ops + backend.set_task(Task(task_id="t1", goal="test")) + backend.set_task_state("t1", TaskState.ACTIVE) + backend.append_trace("t1", {"step": 1}) + assert backend.delete_task("t1") is False + + # Close is safe + backend.close() + assert backend._available is False diff --git a/uvicorn_config.py b/uvicorn_config.py new file mode 100644 index 0000000..e08306e --- /dev/null +++ b/uvicorn_config.py @@ -0,0 +1,27 @@ +"""Uvicorn production configuration for horizontal scaling. + +Usage: + uvicorn fusionagi.api.app:create_app --factory --config uvicorn_config.py + +Or with gunicorn (recommended for multi-process): + gunicorn -c gunicorn.conf.py fusionagi.api.app:create_app + +Environment variables: + FUSIONAGI_WORKERS: Number of worker processes (default: CPU count) + FUSIONAGI_BIND: Host:port (default: 0.0.0.0:8000) + FUSIONAGI_DB_BACKEND: memory|sqlite|postgres (default: memory) + FUSIONAGI_REDIS_URL: Redis URL for shared cache (required for multi-worker) + FUSIONAGI_POSTGRES_DSN: Postgres DSN for shared persistence (required for multi-worker) +""" + +import multiprocessing +import os + +host = os.environ.get("FUSIONAGI_HOST", "0.0.0.0") +port = int(os.environ.get("FUSIONAGI_PORT", "8000")) +workers = int(os.environ.get("FUSIONAGI_WORKERS", multiprocessing.cpu_count())) +log_level = os.environ.get("FUSIONAGI_LOG_LEVEL", "info").lower() +access_log = True +reload = os.environ.get("FUSIONAGI_RELOAD", "false").lower() in ("true", "1") +timeout_keep_alive = 5 +limit_concurrency = int(os.environ.get("FUSIONAGI_CONCURRENCY", "100"))