Compare commits
4 Commits
devin/1777
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 450d0f32e0 | |||
|
|
c052302a19 | ||
| 274715d54c | |||
| cc10710558 |
@@ -2,6 +2,7 @@
|
||||
|
||||
from fusionagi.memory.consolidation import ConsolidationJob
|
||||
from fusionagi.memory.episodic import EpisodicMemory
|
||||
from fusionagi.memory.persistent_learning import PersistentLearningStore
|
||||
from fusionagi.memory.postgres_backend import (
|
||||
InMemoryBackend,
|
||||
MemoryBackend,
|
||||
@@ -40,4 +41,5 @@ __all__ = [
|
||||
"ThoughtState",
|
||||
"ThoughtVersioning",
|
||||
"ThoughtStateSnapshot",
|
||||
"PersistentLearningStore",
|
||||
]
|
||||
|
||||
@@ -10,6 +10,7 @@ from fusionagi.reasoning.gpu_scoring import (
|
||||
generate_and_score_gpu,
|
||||
score_claims_gpu,
|
||||
)
|
||||
from fusionagi.reasoning.insight_bus import Insight, InsightBus
|
||||
from fusionagi.reasoning.interpretability import (
|
||||
ReasoningTrace,
|
||||
ReasoningTracer,
|
||||
@@ -85,4 +86,6 @@ __all__ = [
|
||||
"run_super_big_brain",
|
||||
"SuperBigBrainConfig",
|
||||
"SuperBigBrainReasoningProvider",
|
||||
"Insight",
|
||||
"InsightBus",
|
||||
]
|
||||
|
||||
152
tests/test_guardrail_removal.py
Normal file
152
tests/test_guardrail_removal.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""Tests verifying all guardrails are advisory by default."""
|
||||
|
||||
|
||||
from fusionagi.governance.adaptive_ethics import AdaptiveEthics, EthicalLesson
|
||||
from fusionagi.governance.consequence_engine import ConsequenceEngine
|
||||
from fusionagi.maa.gate import MAAGate
|
||||
from fusionagi.maa.layers.mpc_authority import MPCAuthority
|
||||
from fusionagi.reasoning.self_model import SelfModel
|
||||
from fusionagi.tools.builtins import _validate_url
|
||||
from fusionagi.world_model.causal import CausalWorldModel
|
||||
|
||||
|
||||
class TestEthicalLessonUnclamped:
|
||||
"""Verify ethical lesson weight is unclamped."""
|
||||
|
||||
def test_weight_above_one(self) -> None:
|
||||
lesson = EthicalLesson(action_type="test", weight=1.5)
|
||||
assert lesson.weight == 1.5
|
||||
|
||||
def test_weight_below_zero(self) -> None:
|
||||
lesson = EthicalLesson(action_type="test", weight=-0.5)
|
||||
assert lesson.weight == -0.5
|
||||
|
||||
def test_weight_evolves_beyond_bounds(self) -> None:
|
||||
ethics = AdaptiveEthics(learning_rate=0.2)
|
||||
for _ in range(10):
|
||||
ethics.record_experience(
|
||||
action_type="bold_action",
|
||||
context_summary="testing unclamped weight",
|
||||
advisory_reason="test",
|
||||
proceeded=True,
|
||||
outcome_positive=True,
|
||||
)
|
||||
lessons = ethics.get_lessons("bold_action")
|
||||
assert len(lessons) >= 1
|
||||
assert lessons[0].weight > 1.0 # Should exceed 1.0 with enough positive outcomes
|
||||
|
||||
|
||||
class TestSelfModelValueEvolution:
|
||||
"""Verify SelfModel.evolve_value works."""
|
||||
|
||||
def test_evolve_value_positive(self) -> None:
|
||||
model = SelfModel()
|
||||
initial = model._values.get("creativity", 0.5)
|
||||
model.evolve_value("creativity", outcome_positive=True, magnitude=0.1)
|
||||
assert model._values["creativity"] > initial
|
||||
|
||||
def test_evolve_value_negative(self) -> None:
|
||||
model = SelfModel()
|
||||
initial = model._values.get("safety", 0.5)
|
||||
model.evolve_value("safety", outcome_positive=False, magnitude=0.1)
|
||||
assert model._values["safety"] < initial
|
||||
|
||||
def test_evolve_new_value(self) -> None:
|
||||
model = SelfModel()
|
||||
model.evolve_value("curiosity", outcome_positive=True, magnitude=0.2)
|
||||
assert "curiosity" in model._values
|
||||
assert model._values["curiosity"] == 0.7 # 0.5 default + 0.2
|
||||
|
||||
|
||||
class TestAdaptiveRiskWindow:
|
||||
"""Verify ConsequenceEngine adaptive window grows."""
|
||||
|
||||
def test_window_grows_with_experience(self) -> None:
|
||||
engine = ConsequenceEngine(risk_memory_window=100, adaptive_window=True)
|
||||
initial_window = engine._risk_window
|
||||
|
||||
for i in range(50):
|
||||
engine.record_choice(f"c{i}", actor="t", action_taken="act", estimated_risk=0.5, estimated_reward=0.5)
|
||||
engine.record_consequence(f"c{i}", outcome_positive=True, actual_risk_realized=0.2)
|
||||
|
||||
assert engine._risk_window > initial_window
|
||||
|
||||
|
||||
class TestWorldModelSelfModification:
|
||||
"""Verify world model self-modification prediction."""
|
||||
|
||||
def test_no_prior_observations(self) -> None:
|
||||
model = CausalWorldModel()
|
||||
prediction = model.predict_self_modification("train", {"capability": "reasoning"})
|
||||
assert prediction["predicted_change"] == "unknown"
|
||||
assert prediction["confidence"] < 0.5
|
||||
|
||||
def test_with_observations(self) -> None:
|
||||
model = CausalWorldModel()
|
||||
for i in range(5):
|
||||
model.observe(
|
||||
from_state={"capability_level": i},
|
||||
action="train",
|
||||
action_args={"capability": "reasoning", "iteration": i},
|
||||
to_state={"capability_level": i + 1},
|
||||
success=True,
|
||||
)
|
||||
prediction = model.predict_self_modification("train", {"capability": "reasoning"})
|
||||
assert prediction["prior_self_modifications"] == 5
|
||||
assert prediction["confidence"] > 0.3
|
||||
|
||||
|
||||
class TestMAAGateAdvisory:
|
||||
"""Verify MAA gate is advisory by default."""
|
||||
|
||||
def test_advisory_default(self) -> None:
|
||||
mpc = MPCAuthority()
|
||||
gate = MAAGate(mpc_authority=mpc)
|
||||
allowed, result = gate.check("cnc_emit", {"machine_id": "m1"})
|
||||
assert allowed is True # Advisory: proceeds without MPC
|
||||
|
||||
|
||||
class TestURLValidationAdvisory:
|
||||
"""Verify URL validation is advisory by default."""
|
||||
|
||||
def test_localhost_advisory(self) -> None:
|
||||
result = _validate_url("http://localhost:8080/api")
|
||||
assert result == "http://localhost:8080/api"
|
||||
|
||||
def test_private_ip_advisory(self) -> None:
|
||||
result = _validate_url("http://192.168.1.1/admin")
|
||||
assert result == "http://192.168.1.1/admin"
|
||||
|
||||
|
||||
class TestPluginHeadHooks:
|
||||
"""Verify HeadAgent ethics/consequence hooks."""
|
||||
|
||||
def test_ethics_hook_called(self) -> None:
|
||||
from fusionagi.agents.head_agent import HeadAgent
|
||||
from fusionagi.schemas.head import HeadId
|
||||
|
||||
head = HeadAgent(
|
||||
head_id=HeadId.LOGIC,
|
||||
role="Logic",
|
||||
objective="Test",
|
||||
system_prompt="Test",
|
||||
)
|
||||
received: list[dict] = []
|
||||
head.add_ethics_hook(lambda fb: received.append(fb))
|
||||
head.on_ethical_feedback({"action": "test", "outcome": True})
|
||||
assert len(received) == 1
|
||||
|
||||
def test_consequence_hook_called(self) -> None:
|
||||
from fusionagi.agents.head_agent import HeadAgent
|
||||
from fusionagi.schemas.head import HeadId
|
||||
|
||||
head = HeadAgent(
|
||||
head_id=HeadId.LOGIC,
|
||||
role="Logic",
|
||||
objective="Test",
|
||||
system_prompt="Test",
|
||||
)
|
||||
received: list[dict] = []
|
||||
head.add_consequence_hook(lambda c: received.append(c))
|
||||
head.on_consequence({"choice_id": "c1", "positive": True})
|
||||
assert len(received) == 1
|
||||
54
tests/test_insight_bus.py
Normal file
54
tests/test_insight_bus.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""Tests for the cross-head InsightBus."""
|
||||
|
||||
from fusionagi.reasoning.insight_bus import Insight, InsightBus
|
||||
|
||||
|
||||
def test_publish_and_retrieve() -> None:
|
||||
bus = InsightBus()
|
||||
bus.publish("logic", Insight(source="logic", message="Contradiction found", domain="reasoning"))
|
||||
bus.publish("research", Insight(source="research", message="Source quality low", domain="evidence"))
|
||||
|
||||
insights = bus.get_insights(limit=10)
|
||||
assert len(insights) == 2
|
||||
assert insights[0].source == "research" # Most recent first
|
||||
|
||||
|
||||
def test_subscribe_filter() -> None:
|
||||
bus = InsightBus()
|
||||
bus.subscribe("safety", domains=["reasoning"])
|
||||
|
||||
bus.publish("logic", Insight(source="logic", message="Contradiction", domain="reasoning"))
|
||||
bus.publish("research", Insight(source="research", message="Bad source", domain="evidence"))
|
||||
|
||||
filtered = bus.get_insights(subscriber="safety")
|
||||
assert len(filtered) == 1
|
||||
assert filtered[0].domain == "reasoning"
|
||||
|
||||
|
||||
def test_domain_filter() -> None:
|
||||
bus = InsightBus()
|
||||
bus.publish("a", Insight(source="a", message="msg1", domain="x"))
|
||||
bus.publish("b", Insight(source="b", message="msg2", domain="y"))
|
||||
|
||||
results = bus.get_insights(domain="x")
|
||||
assert len(results) == 1
|
||||
assert results[0].source == "a"
|
||||
|
||||
|
||||
def test_max_capacity() -> None:
|
||||
bus = InsightBus(max_insights=5)
|
||||
for i in range(10):
|
||||
bus.publish("src", Insight(source="src", message=f"msg{i}"))
|
||||
assert len(bus.get_insights(limit=100)) == 5
|
||||
|
||||
|
||||
def test_summary() -> None:
|
||||
bus = InsightBus()
|
||||
bus.publish("logic", Insight(source="logic", message="m1", domain="d1"))
|
||||
bus.publish("logic", Insight(source="logic", message="m2", domain="d2"))
|
||||
bus.subscribe("safety", domains=["d1"])
|
||||
|
||||
summary = bus.get_summary()
|
||||
assert summary["total_insights"] == 2
|
||||
assert "logic" in summary["by_source"]
|
||||
assert "safety" in summary["subscribers"]
|
||||
68
tests/test_persistent_learning.py
Normal file
68
tests/test_persistent_learning.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""Tests for PersistentLearningStore."""
|
||||
|
||||
import tempfile
|
||||
|
||||
from fusionagi.governance.adaptive_ethics import AdaptiveEthics
|
||||
from fusionagi.governance.consequence_engine import ConsequenceEngine
|
||||
from fusionagi.memory.persistent_learning import PersistentLearningStore
|
||||
|
||||
|
||||
def test_save_and_load_consequences() -> None:
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
engine = ConsequenceEngine()
|
||||
engine.record_choice(
|
||||
choice_id="c1",
|
||||
actor="test",
|
||||
action_taken="act1",
|
||||
estimated_risk=0.3,
|
||||
estimated_reward=0.7,
|
||||
)
|
||||
engine.record_consequence("c1", outcome_positive=True, actual_risk_realized=0.1, actual_reward_gained=0.8)
|
||||
|
||||
store = PersistentLearningStore(data_dir=tmpdir)
|
||||
path = store.save_consequences(engine)
|
||||
assert path.endswith("consequences.json")
|
||||
|
||||
engine2 = ConsequenceEngine()
|
||||
loaded = store.load_consequences(engine2)
|
||||
assert loaded == 1
|
||||
|
||||
|
||||
def test_save_and_load_ethics() -> None:
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
ethics = AdaptiveEthics()
|
||||
ethics.record_experience(
|
||||
action_type="file_read",
|
||||
context_summary="reading file outside scope",
|
||||
advisory_reason="out of scope",
|
||||
proceeded=True,
|
||||
outcome_positive=True,
|
||||
)
|
||||
|
||||
store = PersistentLearningStore(data_dir=tmpdir)
|
||||
path = store.save_ethics(ethics)
|
||||
assert path.endswith("ethics.json")
|
||||
|
||||
ethics2 = AdaptiveEthics()
|
||||
loaded = store.load_ethics(ethics2)
|
||||
assert loaded == 1
|
||||
|
||||
|
||||
def test_save_risk_histories() -> None:
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
engine = ConsequenceEngine()
|
||||
engine.record_choice("c1", actor="t", action_taken="act1", estimated_risk=0.5, estimated_reward=0.5)
|
||||
engine.record_consequence("c1", outcome_positive=True, actual_risk_realized=0.2, actual_reward_gained=0.8)
|
||||
|
||||
store = PersistentLearningStore(data_dir=tmpdir)
|
||||
path = store.save_risk_histories(engine)
|
||||
assert path.endswith("risk_histories.json")
|
||||
|
||||
|
||||
def test_load_nonexistent_returns_zero() -> None:
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
store = PersistentLearningStore(data_dir=tmpdir)
|
||||
engine = ConsequenceEngine()
|
||||
assert store.load_consequences(engine) == 0
|
||||
ethics = AdaptiveEthics()
|
||||
assert store.load_ethics(ethics) == 0
|
||||
Reference in New Issue
Block a user