"""Tests for the consequence engine and choice→consequence→learning loop.""" from fusionagi.governance import Alternative, ConsequenceEngine from fusionagi.governance.audit_log import AuditLog from fusionagi.schemas.audit import AuditEventType class TestConsequenceEngine: """Test consequence tracking and risk/reward estimation.""" def test_record_choice(self) -> None: ce = ConsequenceEngine() choice = ce.record_choice( choice_id="c1", actor="planner", action_taken="use_tool_x", estimated_risk=0.3, estimated_reward=0.7, rationale="Tool X is the best fit", ) assert choice.choice_id == "c1" assert choice.estimated_risk == 0.3 assert ce.total_choices == 1 def test_record_consequence(self) -> None: ce = ConsequenceEngine() ce.record_choice(choice_id="c1", actor="planner", action_taken="act") consequence = ce.record_consequence( choice_id="c1", outcome_positive=True, actual_risk_realized=0.1, actual_reward_gained=0.9, description="Action succeeded", ) assert consequence is not None assert consequence.outcome_positive is True assert ce.total_consequences == 1 def test_consequence_not_found(self) -> None: ce = ConsequenceEngine() result = ce.record_consequence(choice_id="nonexistent", outcome_positive=True) assert result is None def test_surprise_factor(self) -> None: ce = ConsequenceEngine() ce.record_choice( choice_id="c1", actor="exec", action_taken="risky_op", estimated_risk=0.1, estimated_reward=0.9, ) consequence = ce.record_consequence( choice_id="c1", outcome_positive=False, actual_risk_realized=0.9, actual_reward_gained=0.1, ) assert consequence is not None assert consequence.surprise_factor > 0.5 def test_estimate_risk_reward_no_history(self) -> None: ce = ConsequenceEngine() estimate = ce.estimate_risk_reward("unknown_action") assert estimate["observations"] == 0 assert estimate["confidence"] == 0.1 def test_estimate_risk_reward_with_history(self) -> None: ce = ConsequenceEngine() for i in range(5): ce.record_choice(f"c{i}", "exec", "tool_call") ce.record_consequence( f"c{i}", outcome_positive=True, actual_risk_realized=0.2, actual_reward_gained=0.8, ) estimate = ce.estimate_risk_reward("tool_call") assert estimate["observations"] == 5 assert abs(estimate["expected_risk"] - 0.2) < 0.01 assert abs(estimate["expected_reward"] - 0.8) < 0.01 def test_alternatives_recorded(self) -> None: ce = ConsequenceEngine() alts = [ Alternative(action="alt_a", estimated_risk=0.6, reason_not_chosen="Too risky"), Alternative(action="alt_b", estimated_risk=0.2, reason_not_chosen="Lower reward"), ] choice = ce.record_choice( choice_id="c1", actor="planner", action_taken="chosen_action", alternatives=alts, ) assert len(choice.alternatives) == 2 assert choice.alternatives[0].reason_not_chosen == "Too risky" def test_get_summary(self) -> None: ce = ConsequenceEngine() ce.record_choice("c1", "exec", "action_a") ce.record_consequence("c1", True, 0.1, 0.9) ce.record_choice("c2", "exec", "action_a") ce.record_consequence("c2", False, 0.8, 0.1) summary = ce.get_summary() assert summary["total_choices"] == 2 assert summary["total_consequences"] == 2 assert summary["positive_outcomes"] == 1 assert summary["negative_outcomes"] == 1 def test_audit_log_integration(self) -> None: audit = AuditLog() ce = ConsequenceEngine(audit_log=audit) ce.record_choice("c1", "exec", "action") ce.record_consequence("c1", True) choices = audit.get_by_type(AuditEventType.CHOICE) consequences = audit.get_by_type(AuditEventType.CONSEQUENCE) assert len(choices) == 1 assert len(consequences) == 1