Some checks failed
- New fusionagi/gpu/ module with TensorBackend protocol abstraction - TensorFlowBackend: GPU-accelerated ops with TensorCore mixed-precision - NumPyBackend: CPU fallback (always available, no extra deps) - Auto-selects best available backend at runtime - GPU-accelerated operations: - Cosine similarity matrix (batched, XLA-compiled) - Multi-head attention for consensus scoring - Batch hypothesis scoring on GPU - Semantic similarity search (pairwise, nearest-neighbor, deduplication) - New TensorFlowAdapter (fusionagi/adapters/): - LLMAdapter for local TF/Keras model inference - TensorCore mixed-precision support - GPU-accelerated embedding synthesis fallback - Reasoning pipeline integration: - gpu_scoring.py: drop-in GPU replacement for multi_path scoring - Super Big Brain: use_gpu config flag, GPU scoring when available - Memory integration: - gpu_search.py: GPU-accelerated semantic search for SemanticGraphMemory - Self-improvement integration: - gpu_training.py: gradient-based heuristic weight optimization - Reflective memory training loop with loss tracking - Dependencies: gpu extra (tensorflow>=2.16, numpy>=1.26) - 64 new tests (276 total), all passing - Architecture spec: docs/gpu_tensorcore_integration.md Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
93 lines
2.8 KiB
Python
93 lines
2.8 KiB
Python
"""GPU-accelerated training integration for the self-improvement pipeline.
|
|
|
|
Wraps fusionagi.gpu.training to provide a self-improvement-aware training
|
|
interface that integrates with AutoTrainer and reflective memory.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from typing import Any, Protocol
|
|
|
|
from fusionagi._logger import logger
|
|
|
|
|
|
class ReflectiveMemoryLike(Protocol):
|
|
"""Protocol for reflective memory access."""
|
|
|
|
def get_lessons(self, limit: int = 50) -> list[dict[str, Any]]: ...
|
|
def get_all_heuristics(self) -> dict[str, Any]: ...
|
|
def set_heuristic(self, key: str, value: Any) -> None: ...
|
|
|
|
|
|
def run_gpu_enhanced_training(
|
|
reflective_memory: ReflectiveMemoryLike,
|
|
epochs: int = 10,
|
|
learning_rate: float = 0.01,
|
|
) -> dict[str, Any]:
|
|
"""Run GPU-accelerated training on reflective memory lessons.
|
|
|
|
Optimizes heuristic scoring weights using gradient descent on GPU,
|
|
then applies the learned improvements back to reflective memory.
|
|
|
|
Args:
|
|
reflective_memory: Source of training data and target for updates.
|
|
epochs: Number of training epochs.
|
|
learning_rate: Learning rate for optimization.
|
|
|
|
Returns:
|
|
Training result dict with loss history and update count.
|
|
"""
|
|
try:
|
|
from fusionagi.gpu.training import (
|
|
TrainingConfig,
|
|
run_gpu_training,
|
|
)
|
|
|
|
config = TrainingConfig(
|
|
learning_rate=learning_rate,
|
|
epochs=epochs,
|
|
)
|
|
result = run_gpu_training(reflective_memory, config=config)
|
|
|
|
if result.weights_updated > 0:
|
|
reflective_memory.set_heuristic(
|
|
"gpu_training_last_loss", result.final_loss
|
|
)
|
|
reflective_memory.set_heuristic(
|
|
"gpu_training_epochs", result.epochs_run
|
|
)
|
|
|
|
logger.info(
|
|
"GPU-enhanced training complete",
|
|
extra={
|
|
"initial_loss": result.initial_loss,
|
|
"final_loss": result.final_loss,
|
|
"weights_updated": result.weights_updated,
|
|
},
|
|
)
|
|
return {
|
|
"initial_loss": result.initial_loss,
|
|
"final_loss": result.final_loss,
|
|
"epochs_run": result.epochs_run,
|
|
"weights_updated": result.weights_updated,
|
|
"gpu_accelerated": True,
|
|
"metadata": result.metadata,
|
|
}
|
|
except ImportError:
|
|
logger.debug("GPU training not available; skipping")
|
|
return {
|
|
"gpu_accelerated": False,
|
|
"reason": "GPU dependencies not installed",
|
|
}
|
|
|
|
|
|
def can_gpu_train() -> bool:
|
|
"""Check if GPU training is available."""
|
|
try:
|
|
from fusionagi.gpu.backend import get_backend
|
|
|
|
get_backend()
|
|
return True
|
|
except ImportError:
|
|
return False
|