Items completed: 1. Merged PR #2 (starlette/httpx deps) 2. Fixed async race condition in multimodal_ui.py 3. Wired TTSAdapter (ElevenLabs, Azure) in API routes 4. Moved super_big_brain.py from core/ to reasoning/ (backward compat shim) 5. Added API authentication middleware (Bearer token via FUSIONAGI_API_KEY) 6. Added async adapter interface (acomplete/acomplete_structured) 7. Migrated FastAPI on_event to lifespan (fixes 20 deprecation warnings) 8. Liquid Neural Networks (continuous-time adaptive weights) 9. Quantum-AI Hybrid compute backend (simulator + optimization) 10. Embodied Intelligence / Robotics bridge (actuator + sensor protocols) 11. Consciousness Engineering (formal self-model with introspection) 12. ASI Scoring Rubric (C/A/L/N/R self-assessment harness) 13. GPU integration tests for TensorFlow backend 14. Multi-stage production Dockerfile 15. Gitea CI/CD pipeline (lint, test matrix, Docker build) 16. API rate limiting middleware (per-IP sliding window) 17. OpenAPI docs cleanup (auth + rate limiting descriptions) 18. Benchmarking suite (decomposition, multi-path, recomposition, e2e) 19. Plugin system (head registry for custom heads) 427 tests passing, 0 ruff errors, 0 mypy errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
95 lines
3.5 KiB
Python
95 lines
3.5 KiB
Python
"""Tests for Consciousness Engineering — formal self-model."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from fusionagi.reasoning.self_model import (
|
|
AttentionFocus,
|
|
CognitiveState,
|
|
SelfModel,
|
|
)
|
|
|
|
|
|
class TestSelfModel:
|
|
def test_initial_state(self) -> None:
|
|
sm = SelfModel()
|
|
assert sm.cognitive_state == CognitiveState.IDLE
|
|
assert sm.attention_focus == AttentionFocus.TASK
|
|
|
|
def test_set_state(self) -> None:
|
|
sm = SelfModel()
|
|
sm.set_state(CognitiveState.REASONING, AttentionFocus.INTERNAL_STATE, "thinking hard")
|
|
assert sm.cognitive_state == CognitiveState.REASONING
|
|
assert sm.attention_focus == AttentionFocus.INTERNAL_STATE
|
|
|
|
def test_register_and_update_capability(self) -> None:
|
|
sm = SelfModel()
|
|
sm.register_capability("logic", "formal reasoning", initial_confidence=0.6)
|
|
sm.update_capability("logic", success=True)
|
|
sm.update_capability("logic", success=True)
|
|
sm.update_capability("logic", success=False)
|
|
report = sm.introspect()
|
|
assert "logic" in report["capabilities"]
|
|
assert report["capabilities"]["logic"]["evidence_count"] == 3
|
|
|
|
def test_goal_management(self) -> None:
|
|
sm = SelfModel()
|
|
sm.set_goal("g1", "Learn from mistakes", priority=0.9)
|
|
sm.update_goal_progress("g1", 0.5)
|
|
report = sm.introspect()
|
|
assert "g1" in report["goals"]
|
|
assert report["goals"]["g1"]["progress"] == 0.5
|
|
|
|
def test_goal_alignment_check(self) -> None:
|
|
sm = SelfModel()
|
|
sm.set_goal("g1", "Test goal")
|
|
sm._goals["g1"].aligned_with_values = False
|
|
warnings = sm.check_goal_alignment()
|
|
assert any("conflict" in w for w in warnings)
|
|
|
|
def test_emotional_state_update(self) -> None:
|
|
sm = SelfModel()
|
|
sm.update_emotional_state("confidence", 0.3)
|
|
report = sm.introspect()
|
|
assert report["emotional_state"]["confidence"] > 0.5
|
|
|
|
def test_emotional_state_clamped(self) -> None:
|
|
sm = SelfModel()
|
|
sm.update_emotional_state("confidence", 10.0)
|
|
assert sm._emotional_state["confidence"] == 1.0
|
|
sm.update_emotional_state("confidence", -20.0)
|
|
assert sm._emotional_state["confidence"] == 0.0
|
|
|
|
def test_explain_state(self) -> None:
|
|
sm = SelfModel()
|
|
sm.set_state(CognitiveState.REASONING, AttentionFocus.TASK)
|
|
explanation = sm.explain_state()
|
|
assert "reasoning" in explanation
|
|
assert "task" in explanation
|
|
|
|
def test_introspect_returns_all_fields(self) -> None:
|
|
sm = SelfModel()
|
|
report = sm.introspect()
|
|
assert "cognitive_state" in report
|
|
assert "attention_focus" in report
|
|
assert "capabilities" in report
|
|
assert "goals" in report
|
|
assert "values" in report
|
|
assert "emotional_state" in report
|
|
assert "recent_thoughts" in report
|
|
|
|
def test_introspection_log_trimming(self) -> None:
|
|
sm = SelfModel()
|
|
sm._max_log_size = 10
|
|
for i in range(200):
|
|
sm.set_state(CognitiveState.REASONING, thought=f"thought_{i}")
|
|
# After exceeding max_log_size, the log is trimmed to notable + last 100
|
|
assert len(sm._introspection_log) <= 120
|
|
|
|
def test_get_summary(self) -> None:
|
|
sm = SelfModel()
|
|
sm.register_capability("test", "test cap")
|
|
sm.set_goal("g1", "test goal")
|
|
summary = sm.get_summary()
|
|
assert summary["capabilities_count"] == 1
|
|
assert summary["goals_count"] == 1
|