Items completed: 1. Merged PR #2 (starlette/httpx deps) 2. Fixed async race condition in multimodal_ui.py 3. Wired TTSAdapter (ElevenLabs, Azure) in API routes 4. Moved super_big_brain.py from core/ to reasoning/ (backward compat shim) 5. Added API authentication middleware (Bearer token via FUSIONAGI_API_KEY) 6. Added async adapter interface (acomplete/acomplete_structured) 7. Migrated FastAPI on_event to lifespan (fixes 20 deprecation warnings) 8. Liquid Neural Networks (continuous-time adaptive weights) 9. Quantum-AI Hybrid compute backend (simulator + optimization) 10. Embodied Intelligence / Robotics bridge (actuator + sensor protocols) 11. Consciousness Engineering (formal self-model with introspection) 12. ASI Scoring Rubric (C/A/L/N/R self-assessment harness) 13. GPU integration tests for TensorFlow backend 14. Multi-stage production Dockerfile 15. Gitea CI/CD pipeline (lint, test matrix, Docker build) 16. API rate limiting middleware (per-IP sliding window) 17. OpenAPI docs cleanup (auth + rate limiting descriptions) 18. Benchmarking suite (decomposition, multi-path, recomposition, e2e) 19. Plugin system (head registry for custom heads) 427 tests passing, 0 ruff errors, 0 mypy errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
162 lines
5.3 KiB
Python
162 lines
5.3 KiB
Python
"""Tests for Super Big Brain: atomic decomposition, graph, recomposition."""
|
|
|
|
|
|
from fusionagi.core.super_big_brain import (
|
|
SuperBigBrainReasoningProvider,
|
|
run_super_big_brain,
|
|
)
|
|
from fusionagi.memory.scratchpad import LatentScratchpad
|
|
from fusionagi.memory.semantic_graph import SemanticGraphMemory
|
|
from fusionagi.memory.sharding import Shard, shard_context
|
|
from fusionagi.reasoning.context_loader import build_compact_prompt, load_context_for_reasoning
|
|
from fusionagi.reasoning.decomposition import decompose_recursive
|
|
from fusionagi.reasoning.meta_reasoning import challenge_assumptions, detect_contradictions
|
|
from fusionagi.reasoning.recomposition import RecomposedResponse
|
|
from fusionagi.schemas.atomic import (
|
|
AtomicSemanticUnit,
|
|
AtomicUnitType,
|
|
DecompositionResult,
|
|
RelationType,
|
|
SemanticRelation,
|
|
)
|
|
from fusionagi.schemas.head import HeadId
|
|
|
|
|
|
class TestAtomicSchema:
|
|
"""Test atomic semantic unit schemas."""
|
|
|
|
def test_atomic_unit_creation(self):
|
|
u = AtomicSemanticUnit(
|
|
unit_id="asu_1",
|
|
content="Test fact",
|
|
type=AtomicUnitType.FACT,
|
|
confidence=0.9,
|
|
)
|
|
assert u.unit_id == "asu_1"
|
|
assert u.content == "Test fact"
|
|
assert u.type == AtomicUnitType.FACT
|
|
assert u.confidence == 0.9
|
|
|
|
def test_decomposition_result(self):
|
|
u = AtomicSemanticUnit(unit_id="asu_1", content="Fact", type=AtomicUnitType.FACT)
|
|
r = SemanticRelation(from_id="root", to_id="asu_1", relation_type=RelationType.LOGICAL)
|
|
result = DecompositionResult(units=[u], relations=[r], depth=0)
|
|
assert len(result.units) == 1
|
|
assert len(result.relations) == 1
|
|
assert result.depth == 0
|
|
|
|
|
|
class TestDecomposition:
|
|
"""Test recursive decomposition."""
|
|
|
|
def test_decompose_simple(self):
|
|
result = decompose_recursive("What are the security risks? Must support 1M users.")
|
|
assert len(result.units) >= 1
|
|
assert result.depth >= 0
|
|
|
|
def test_decompose_empty(self):
|
|
result = decompose_recursive("")
|
|
assert len(result.units) == 0
|
|
|
|
def test_decompose_max_depth(self):
|
|
result = decompose_recursive("Question one? Question two? Question three?", max_depth=1)
|
|
assert result.depth <= 1
|
|
|
|
|
|
class TestSemanticGraph:
|
|
"""Test semantic graph memory."""
|
|
|
|
def test_add_and_query(self):
|
|
g = SemanticGraphMemory()
|
|
u = AtomicSemanticUnit(unit_id="asu_1", content="Fact", type=AtomicUnitType.FACT)
|
|
g.add_unit(u)
|
|
assert g.get_unit("asu_1") == u
|
|
assert len(g.query_units()) >= 1
|
|
|
|
def test_ingest_decomposition(self):
|
|
g = SemanticGraphMemory()
|
|
r = decompose_recursive("What is X? Constraint: must be fast.")
|
|
g.ingest_decomposition(r.units, r.relations)
|
|
assert len(g.query_units()) >= 1
|
|
|
|
|
|
class TestSharding:
|
|
"""Test context sharding."""
|
|
|
|
def test_shard_context(self):
|
|
r = decompose_recursive("Security risk? Cost constraint?")
|
|
shards = shard_context(r.units, max_cluster_size=5)
|
|
assert isinstance(shards, list)
|
|
assert all(isinstance(s, Shard) for s in shards)
|
|
|
|
|
|
class TestContextLoader:
|
|
"""Test retrieve-by-reference."""
|
|
|
|
def test_load_context(self):
|
|
r = decompose_recursive("Test prompt")
|
|
ctx = load_context_for_reasoning(r.units)
|
|
assert "unit_refs" in ctx
|
|
assert "unit_summaries" in ctx
|
|
|
|
def test_build_compact_prompt(self):
|
|
r = decompose_recursive("Short prompt")
|
|
prompt = build_compact_prompt(r.units, max_chars=1000)
|
|
assert isinstance(prompt, str)
|
|
|
|
|
|
class TestScratchpad:
|
|
"""Test latent scratchpad."""
|
|
|
|
def test_append_and_get(self):
|
|
s = LatentScratchpad()
|
|
s.append_hypothesis("H1")
|
|
s.append_discarded("D1")
|
|
state = s.get_intermediate()
|
|
assert len(state.hypotheses) == 1
|
|
assert len(state.discarded_paths) == 1
|
|
|
|
def test_clear(self):
|
|
s = LatentScratchpad()
|
|
s.append_hypothesis("H1")
|
|
s.clear()
|
|
state = s.get_intermediate()
|
|
assert len(state.hypotheses) == 0
|
|
|
|
|
|
class TestMetaReasoning:
|
|
"""Test meta-reasoning hooks."""
|
|
|
|
def test_challenge_assumptions(self):
|
|
u = AtomicSemanticUnit(
|
|
unit_id="asu_1",
|
|
content="Assume X is true",
|
|
type=AtomicUnitType.ASSUMPTION,
|
|
)
|
|
flagged = challenge_assumptions([u], "Conclusion based on X")
|
|
assert len(flagged) >= 0
|
|
|
|
def test_detect_contradictions(self):
|
|
u1 = AtomicSemanticUnit(unit_id="a", content="X is true", type=AtomicUnitType.FACT)
|
|
u2 = AtomicSemanticUnit(unit_id="b", content="X is not true", type=AtomicUnitType.FACT)
|
|
pairs = detect_contradictions([u1, u2])
|
|
assert isinstance(pairs, list)
|
|
|
|
|
|
class TestSuperBigBrain:
|
|
"""Test Super Big Brain orchestrator."""
|
|
|
|
def test_run_super_big_brain(self):
|
|
g = SemanticGraphMemory()
|
|
r = run_super_big_brain("What are the risks?", g)
|
|
assert isinstance(r, RecomposedResponse)
|
|
assert r.summary
|
|
assert 0 <= r.confidence <= 1
|
|
|
|
def test_super_big_brain_reasoning_provider(self):
|
|
p = SuperBigBrainReasoningProvider()
|
|
ho = p.produce_head_output(HeadId.LOGIC, "Analyze architecture")
|
|
assert ho.head_id == HeadId.LOGIC
|
|
assert ho.summary
|
|
assert len(ho.claims) >= 0
|