Items completed: 1. Merged PR #2 (starlette/httpx deps) 2. Fixed async race condition in multimodal_ui.py 3. Wired TTSAdapter (ElevenLabs, Azure) in API routes 4. Moved super_big_brain.py from core/ to reasoning/ (backward compat shim) 5. Added API authentication middleware (Bearer token via FUSIONAGI_API_KEY) 6. Added async adapter interface (acomplete/acomplete_structured) 7. Migrated FastAPI on_event to lifespan (fixes 20 deprecation warnings) 8. Liquid Neural Networks (continuous-time adaptive weights) 9. Quantum-AI Hybrid compute backend (simulator + optimization) 10. Embodied Intelligence / Robotics bridge (actuator + sensor protocols) 11. Consciousness Engineering (formal self-model with introspection) 12. ASI Scoring Rubric (C/A/L/N/R self-assessment harness) 13. GPU integration tests for TensorFlow backend 14. Multi-stage production Dockerfile 15. Gitea CI/CD pipeline (lint, test matrix, Docker build) 16. API rate limiting middleware (per-IP sliding window) 17. OpenAPI docs cleanup (auth + rate limiting descriptions) 18. Benchmarking suite (decomposition, multi-path, recomposition, e2e) 19. Plugin system (head registry for custom heads) 427 tests passing, 0 ruff errors, 0 mypy errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
148 lines
4.7 KiB
Python
148 lines
4.7 KiB
Python
"""Integration tests for GPU/TensorFlow backend.
|
|
|
|
These tests validate the TensorFlow backend when available, and confirm
|
|
the NumPy fallback produces equivalent shapes/types otherwise.
|
|
|
|
Requires: pip install fusionagi[gpu]
|
|
Skipped gracefully when TensorFlow is not installed.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import numpy as np
|
|
import pytest
|
|
|
|
from fusionagi.gpu.backend import DeviceType, NumPyBackend, get_backend, reset_backend
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def _reset_backend():
|
|
"""Reset global backend between tests."""
|
|
reset_backend()
|
|
yield
|
|
reset_backend()
|
|
|
|
|
|
# ---------- NumPy fallback (always runs) ----------
|
|
|
|
class TestNumPyBackendShapes:
|
|
"""Verify shapes and dtypes from the NumPy fallback backend."""
|
|
|
|
def test_embed_texts_shape(self) -> None:
|
|
b = NumPyBackend()
|
|
embs = b.embed_texts(["hello world", "foo bar baz"])
|
|
assert embs.shape[0] == 2
|
|
assert embs.shape[1] > 0
|
|
|
|
def test_cosine_similarity_matrix_shape(self) -> None:
|
|
b = NumPyBackend()
|
|
a = b.embed_texts(["a", "b", "c"])
|
|
x = b.embed_texts(["x", "y"])
|
|
sim = b.cosine_similarity_matrix(a, x)
|
|
assert sim.shape == (3, 2)
|
|
assert np.all(sim >= -1.0 - 1e-6) and np.all(sim <= 1.0 + 1e-6)
|
|
|
|
def test_batch_score_shape(self) -> None:
|
|
b = NumPyBackend()
|
|
hyp = b.embed_texts(["hyp1", "hyp2", "hyp3"])
|
|
ref = b.embed_texts(["reference"])
|
|
scores = b.batch_score(hyp, ref)
|
|
arr = b.to_numpy(scores)
|
|
assert arr.shape == (3,)
|
|
|
|
def test_multi_head_attention_shape(self) -> None:
|
|
b = NumPyBackend()
|
|
q = b.embed_texts(["query1", "query2"])
|
|
k = b.embed_texts(["key1", "key2", "key3"])
|
|
v = b.embed_texts(["val1", "val2", "val3"])
|
|
out = b.multi_head_attention(q, k, v, num_heads=4)
|
|
assert out.shape[0] == 2
|
|
|
|
def test_to_numpy_roundtrip(self) -> None:
|
|
b = NumPyBackend()
|
|
arr = np.array([1.0, 2.0, 3.0])
|
|
tensor = b.from_numpy(arr)
|
|
back = b.to_numpy(tensor)
|
|
np.testing.assert_array_equal(arr, back)
|
|
|
|
def test_device_summary(self) -> None:
|
|
b = NumPyBackend()
|
|
summary = b.device_summary()
|
|
assert summary["backend"] == "numpy"
|
|
assert summary["device"] == "cpu"
|
|
|
|
|
|
# ---------- TensorFlow backend (skipped if not installed) ----------
|
|
|
|
tf = pytest.importorskip("tensorflow", reason="TensorFlow not installed (pip install fusionagi[gpu])")
|
|
|
|
|
|
class TestTensorFlowBackend:
|
|
"""Tests that run only when TensorFlow is available."""
|
|
|
|
def _get_tf_backend(self):
|
|
from fusionagi.gpu.backend import get_backend
|
|
backend = get_backend()
|
|
if backend.name != "tensorflow":
|
|
pytest.skip("TensorFlow backend not selected (GPU may not be available)")
|
|
return backend
|
|
|
|
def test_embed_texts(self) -> None:
|
|
b = self._get_tf_backend()
|
|
embs = b.embed_texts(["test embedding"])
|
|
arr = b.to_numpy(embs)
|
|
assert arr.ndim == 2
|
|
assert arr.shape[0] == 1
|
|
|
|
def test_cosine_similarity(self) -> None:
|
|
b = self._get_tf_backend()
|
|
a = b.embed_texts(["hello"])
|
|
x = b.embed_texts(["hello"])
|
|
sim = b.cosine_similarity_matrix(a, x)
|
|
arr = b.to_numpy(sim)
|
|
assert arr.shape == (1, 1)
|
|
assert arr[0, 0] > 0.99 # Same text => high similarity
|
|
|
|
def test_batch_score(self) -> None:
|
|
b = self._get_tf_backend()
|
|
hyp = b.embed_texts(["a", "b"])
|
|
ref = b.embed_texts(["a"])
|
|
scores = b.to_numpy(b.batch_score(hyp, ref))
|
|
assert scores.shape == (2,)
|
|
|
|
def test_multi_head_attention(self) -> None:
|
|
b = self._get_tf_backend()
|
|
q = b.embed_texts(["q1", "q2"])
|
|
k = b.embed_texts(["k1", "k2"])
|
|
v = b.embed_texts(["v1", "v2"])
|
|
out = b.multi_head_attention(q, k, v, num_heads=2)
|
|
arr = b.to_numpy(out)
|
|
assert arr.shape[0] == 2
|
|
|
|
def test_mixed_precision(self) -> None:
|
|
b = self._get_tf_backend()
|
|
b.enable_mixed_precision() # Should not raise
|
|
|
|
def test_gpu_available(self) -> None:
|
|
b = self._get_tf_backend()
|
|
# Just check the method runs
|
|
result = b.gpu_available()
|
|
assert isinstance(result, bool)
|
|
|
|
|
|
# ---------- get_backend auto-selection ----------
|
|
|
|
class TestBackendAutoSelect:
|
|
"""Test that get_backend returns a valid backend."""
|
|
|
|
def test_returns_valid_backend(self) -> None:
|
|
b = get_backend()
|
|
assert b.name in ("numpy", "tensorflow")
|
|
assert b.device in (DeviceType.CPU, DeviceType.GPU, DeviceType.TPU)
|
|
|
|
def test_embed_texts_works(self) -> None:
|
|
b = get_backend()
|
|
embs = b.embed_texts(["test"])
|
|
arr = b.to_numpy(embs)
|
|
assert arr.ndim == 2
|