Some checks failed
- New fusionagi/gpu/ module with TensorBackend protocol abstraction - TensorFlowBackend: GPU-accelerated ops with TensorCore mixed-precision - NumPyBackend: CPU fallback (always available, no extra deps) - Auto-selects best available backend at runtime - GPU-accelerated operations: - Cosine similarity matrix (batched, XLA-compiled) - Multi-head attention for consensus scoring - Batch hypothesis scoring on GPU - Semantic similarity search (pairwise, nearest-neighbor, deduplication) - New TensorFlowAdapter (fusionagi/adapters/): - LLMAdapter for local TF/Keras model inference - TensorCore mixed-precision support - GPU-accelerated embedding synthesis fallback - Reasoning pipeline integration: - gpu_scoring.py: drop-in GPU replacement for multi_path scoring - Super Big Brain: use_gpu config flag, GPU scoring when available - Memory integration: - gpu_search.py: GPU-accelerated semantic search for SemanticGraphMemory - Self-improvement integration: - gpu_training.py: gradient-based heuristic weight optimization - Reflective memory training loop with loss tracking - Dependencies: gpu extra (tensorflow>=2.16, numpy>=1.26) - 64 new tests (276 total), all passing - Architecture spec: docs/gpu_tensorcore_integration.md Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
130 lines
3.7 KiB
Python
130 lines
3.7 KiB
Python
"""Tests for fusionagi.gpu backend, similarity, attention, scoring, and training."""
|
|
|
|
import pytest
|
|
import numpy as np
|
|
|
|
from fusionagi.gpu.backend import (
|
|
DeviceType,
|
|
NumPyBackend,
|
|
TensorBackend,
|
|
get_backend,
|
|
reset_backend,
|
|
)
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def _reset():
|
|
"""Reset backend singleton between tests."""
|
|
reset_backend()
|
|
yield
|
|
reset_backend()
|
|
|
|
|
|
class TestNumPyBackend:
|
|
"""Tests for NumPyBackend (CPU fallback)."""
|
|
|
|
def test_name(self):
|
|
be = NumPyBackend()
|
|
assert be.name == "numpy"
|
|
|
|
def test_device(self):
|
|
be = NumPyBackend()
|
|
assert be.device == DeviceType.CPU
|
|
|
|
def test_gpu_available(self):
|
|
be = NumPyBackend()
|
|
assert be.gpu_available() is False
|
|
|
|
def test_embed_texts_shape(self):
|
|
be = NumPyBackend()
|
|
emb = be.embed_texts(["hello world", "foo bar baz"])
|
|
assert emb.shape == (2, 256)
|
|
|
|
def test_embed_texts_normalized(self):
|
|
be = NumPyBackend()
|
|
emb = be.embed_texts(["some text here"])
|
|
norm = np.linalg.norm(emb[0])
|
|
assert abs(norm - 1.0) < 1e-5
|
|
|
|
def test_embed_texts_deterministic(self):
|
|
be = NumPyBackend()
|
|
emb1 = be.embed_texts(["hello world"])
|
|
emb2 = be.embed_texts(["hello world"])
|
|
np.testing.assert_array_almost_equal(emb1, emb2)
|
|
|
|
def test_cosine_similarity_matrix_shape(self):
|
|
be = NumPyBackend()
|
|
a = be.embed_texts(["hello", "world"])
|
|
b = be.embed_texts(["foo", "bar", "baz"])
|
|
sim = be.cosine_similarity_matrix(a, b)
|
|
assert sim.shape == (2, 3)
|
|
|
|
def test_cosine_similarity_self(self):
|
|
be = NumPyBackend()
|
|
emb = be.embed_texts(["test sentence"])
|
|
sim = be.cosine_similarity_matrix(emb, emb)
|
|
assert abs(sim[0, 0] - 1.0) < 1e-5
|
|
|
|
def test_batch_score_shape(self):
|
|
be = NumPyBackend()
|
|
hyp = be.embed_texts(["h1", "h2", "h3"])
|
|
ref = be.embed_texts(["reference"])[0]
|
|
scores = be.batch_score(hyp, ref)
|
|
assert scores.shape == (3,)
|
|
|
|
def test_batch_score_with_weights(self):
|
|
be = NumPyBackend()
|
|
hyp = be.embed_texts(["h1", "h2"])
|
|
ref = be.embed_texts(["reference"])[0]
|
|
weights = np.ones(256, dtype=np.float32)
|
|
scores = be.batch_score(hyp, ref, weights)
|
|
assert scores.shape == (2,)
|
|
|
|
def test_multi_head_attention_shape(self):
|
|
be = NumPyBackend()
|
|
q = be.embed_texts(["query1", "query2"])
|
|
k = be.embed_texts(["key1", "key2", "key3"])
|
|
v = be.embed_texts(["val1", "val2", "val3"])
|
|
out = be.multi_head_attention(q, k, v, num_heads=4)
|
|
assert out.shape[0] == 2
|
|
|
|
def test_to_numpy_roundtrip(self):
|
|
be = NumPyBackend()
|
|
arr = np.array([1.0, 2.0, 3.0])
|
|
tensor = be.from_numpy(arr)
|
|
result = be.to_numpy(tensor)
|
|
np.testing.assert_array_equal(arr, result)
|
|
|
|
def test_device_summary(self):
|
|
be = NumPyBackend()
|
|
summary = be.device_summary()
|
|
assert summary["backend"] == "numpy"
|
|
assert summary["device"] == "cpu"
|
|
|
|
def test_enable_mixed_precision_noop(self):
|
|
be = NumPyBackend()
|
|
be.enable_mixed_precision()
|
|
|
|
|
|
class TestGetBackend:
|
|
"""Tests for backend auto-selection."""
|
|
|
|
def test_force_numpy(self):
|
|
be = get_backend(force="numpy")
|
|
assert be.name == "numpy"
|
|
|
|
def test_default_returns_backend(self):
|
|
be = get_backend()
|
|
assert isinstance(be, TensorBackend)
|
|
|
|
def test_cached_singleton(self):
|
|
be1 = get_backend(force="numpy")
|
|
be2 = get_backend()
|
|
assert be1 is be2
|
|
|
|
def test_reset_clears_cache(self):
|
|
be1 = get_backend(force="numpy")
|
|
reset_backend()
|
|
be2 = get_backend(force="numpy")
|
|
assert be1 is not be2
|