Items completed: 1. Merged PR #2 (starlette/httpx deps) 2. Fixed async race condition in multimodal_ui.py 3. Wired TTSAdapter (ElevenLabs, Azure) in API routes 4. Moved super_big_brain.py from core/ to reasoning/ (backward compat shim) 5. Added API authentication middleware (Bearer token via FUSIONAGI_API_KEY) 6. Added async adapter interface (acomplete/acomplete_structured) 7. Migrated FastAPI on_event to lifespan (fixes 20 deprecation warnings) 8. Liquid Neural Networks (continuous-time adaptive weights) 9. Quantum-AI Hybrid compute backend (simulator + optimization) 10. Embodied Intelligence / Robotics bridge (actuator + sensor protocols) 11. Consciousness Engineering (formal self-model with introspection) 12. ASI Scoring Rubric (C/A/L/N/R self-assessment harness) 13. GPU integration tests for TensorFlow backend 14. Multi-stage production Dockerfile 15. Gitea CI/CD pipeline (lint, test matrix, Docker build) 16. API rate limiting middleware (per-IP sliding window) 17. OpenAPI docs cleanup (auth + rate limiting descriptions) 18. Benchmarking suite (decomposition, multi-path, recomposition, e2e) 19. Plugin system (head registry for custom heads) 427 tests passing, 0 ruff errors, 0 mypy errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
132 lines
4.5 KiB
Python
132 lines
4.5 KiB
Python
"""Tests for fusionagi.gpu.training and self_improvement.gpu_training."""
|
|
|
|
import pytest
|
|
|
|
from fusionagi.gpu.backend import get_backend, reset_backend
|
|
from fusionagi.gpu.training import (
|
|
TrainingConfig,
|
|
optimize_heuristic_weights,
|
|
prepare_training_pairs,
|
|
run_gpu_training,
|
|
)
|
|
from fusionagi.self_improvement.gpu_training import (
|
|
can_gpu_train,
|
|
run_gpu_enhanced_training,
|
|
)
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def _use_numpy():
|
|
reset_backend()
|
|
get_backend(force="numpy")
|
|
yield
|
|
reset_backend()
|
|
|
|
|
|
class FakeReflectiveMemory:
|
|
"""Fake reflective memory for testing."""
|
|
|
|
def __init__(self, lessons: list | None = None):
|
|
self._lessons = lessons or []
|
|
self._heuristics: dict = {}
|
|
|
|
def get_lessons(self, limit: int = 50) -> list:
|
|
return self._lessons[:limit]
|
|
|
|
def get_all_heuristics(self) -> dict:
|
|
return dict(self._heuristics)
|
|
|
|
def set_heuristic(self, key: str, value) -> None:
|
|
self._heuristics[key] = value
|
|
|
|
|
|
class TestPrepareTrainingPairs:
|
|
def test_empty(self):
|
|
be = get_backend()
|
|
inputs, targets = prepare_training_pairs([], backend=be)
|
|
assert be.to_numpy(inputs).shape[0] == 0
|
|
|
|
def test_basic(self):
|
|
be = get_backend()
|
|
lessons = [
|
|
{"task_id": "t1", "outcome": "success", "evaluation": {"score": 0.9}},
|
|
{"task_id": "t2", "outcome": "failed", "evaluation": {"score": 0.2}},
|
|
]
|
|
inputs, targets = prepare_training_pairs(lessons, backend=be)
|
|
inputs_np = be.to_numpy(inputs)
|
|
targets_np = be.to_numpy(targets)
|
|
assert inputs_np.shape[0] == 2
|
|
assert targets_np.shape == (2,)
|
|
assert abs(targets_np[0] - 0.9) < 1e-5
|
|
assert abs(targets_np[1] - 0.2) < 1e-5
|
|
|
|
|
|
class TestOptimizeHeuristicWeights:
|
|
def test_empty_data(self):
|
|
be = get_backend()
|
|
import numpy as np
|
|
inputs = be.from_numpy(np.zeros((0, 256), dtype=np.float32))
|
|
targets = be.from_numpy(np.zeros(0, dtype=np.float32))
|
|
result = optimize_heuristic_weights(inputs, targets, backend=be)
|
|
assert result.metadata.get("reason") == "no training data"
|
|
|
|
def test_basic_training(self):
|
|
be = get_backend()
|
|
import numpy as np
|
|
np.random.seed(42)
|
|
inputs = be.from_numpy(np.random.randn(10, 256).astype(np.float32))
|
|
targets = be.from_numpy(np.random.rand(10).astype(np.float32))
|
|
config = TrainingConfig(epochs=5, learning_rate=0.001)
|
|
result = optimize_heuristic_weights(inputs, targets, config=config, backend=be)
|
|
assert result.epochs_run == 5
|
|
assert result.weights_updated == 256
|
|
assert result.metadata["backend"] == "numpy"
|
|
|
|
def test_loss_decreases(self):
|
|
be = get_backend()
|
|
import numpy as np
|
|
np.random.seed(42)
|
|
inputs = be.from_numpy(np.random.randn(50, 256).astype(np.float32))
|
|
targets = be.from_numpy(np.random.rand(50).astype(np.float32))
|
|
config = TrainingConfig(epochs=20, learning_rate=0.01)
|
|
result = optimize_heuristic_weights(inputs, targets, config=config, backend=be)
|
|
# Loss should generally decrease with training
|
|
assert result.final_loss <= result.initial_loss + 0.5
|
|
|
|
|
|
class TestRunGPUTraining:
|
|
def test_no_lessons(self):
|
|
mem = FakeReflectiveMemory(lessons=[])
|
|
result = run_gpu_training(mem)
|
|
assert result.metadata.get("reason") == "no lessons available"
|
|
|
|
def test_with_lessons(self):
|
|
lessons = [
|
|
{"task_id": f"t{i}", "outcome": "ok", "evaluation": {"score": 0.5 + i * 0.1}}
|
|
for i in range(5)
|
|
]
|
|
mem = FakeReflectiveMemory(lessons=lessons)
|
|
config = TrainingConfig(epochs=3)
|
|
result = run_gpu_training(mem, config=config)
|
|
assert result.epochs_run == 3
|
|
|
|
|
|
class TestSelfImprovementGPUTraining:
|
|
def test_can_gpu_train(self):
|
|
assert can_gpu_train() is True
|
|
|
|
def test_run_enhanced_training_empty(self):
|
|
mem = FakeReflectiveMemory(lessons=[])
|
|
result = run_gpu_enhanced_training(mem, epochs=3)
|
|
assert result.get("gpu_accelerated") is True or "reason" in result
|
|
|
|
def test_run_enhanced_training_with_data(self):
|
|
lessons = [
|
|
{"task_id": "t1", "outcome": "ok", "evaluation": {"score": 0.8}},
|
|
{"task_id": "t2", "outcome": "fail", "evaluation": {"score": 0.3}},
|
|
]
|
|
mem = FakeReflectiveMemory(lessons=lessons)
|
|
result = run_gpu_enhanced_training(mem, epochs=3)
|
|
assert result.get("gpu_accelerated") is True
|
|
assert "gpu_training_last_loss" in mem.get_all_heuristics()
|