18 changes implementing full advisory philosophy: 1. Safety Head prompt: prevention mandate → advisory observation 2. Native Reasoning: Safety claims conditional on actual risk signals 3. File Tool: path scope advisory (log + proceed) 4. HTTP Tool: SSRF protection advisory (log + proceed) 5. File Size Cap: configurable (default unlimited) 6. PII Detection: integrated with AdaptiveEthics 7. Embodiment: force limit advisory (log, don't clamp) 8. Embodiment: workspace bounds advisory (log, don't reject) 9. API Rate Limiter: advisory (log, don't hard 429) 10. MAA Gate: GovernanceMode.ADVISORY default 11. Physics Authority: safety factor advisory, not hard reject 12. Self-Model: evolve_value() for experience-based value evolution 13. Ethical Lesson: weight unclamped for full dynamic range 14. ConsequenceEngine: adaptive risk_memory_window 15. Cross-Head Learning: shared InsightBus between heads 16. World Model: self-modification prediction 17. Persistent memory: file-backed learning store 18. Plugin Heads: ethics/consequence hooks in HeadAgent + HeadRegistry 429 tests passing, 0 ruff errors, 0 new mypy errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
201 lines
6.4 KiB
Python
201 lines
6.4 KiB
Python
"""Persistent learning memory — survive restarts.
|
|
|
|
Serializes ConsequenceEngine choices/consequences and AdaptiveEthics
|
|
lessons to JSON files so the system's learned wisdom persists across
|
|
sessions. Can be backed by file or database.
|
|
|
|
Usage:
|
|
|
|
store = PersistentLearningStore("/path/to/learning_data")
|
|
store.save_consequences(engine)
|
|
store.save_ethics(ethics)
|
|
|
|
# On restart:
|
|
store.load_consequences(engine)
|
|
store.load_ethics(ethics)
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import os
|
|
from pathlib import Path
|
|
from typing import Any
|
|
|
|
from fusionagi._logger import logger
|
|
|
|
|
|
class PersistentLearningStore:
|
|
"""File-backed persistent store for learning data.
|
|
|
|
Stores consequence engine state and ethical lessons as JSON files
|
|
in a specified directory. Thread-safe via atomic writes.
|
|
|
|
Args:
|
|
data_dir: Directory for persisted files.
|
|
"""
|
|
|
|
def __init__(self, data_dir: str | Path = "learning_data") -> None:
|
|
self._dir = Path(data_dir)
|
|
self._dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
@property
|
|
def data_dir(self) -> Path:
|
|
"""Directory where learning data is stored."""
|
|
return self._dir
|
|
|
|
def save_consequences(self, engine: Any) -> str:
|
|
"""Persist ConsequenceEngine state to disk.
|
|
|
|
Args:
|
|
engine: A ConsequenceEngine instance.
|
|
|
|
Returns:
|
|
Path to the saved file.
|
|
"""
|
|
data: dict[str, Any] = {
|
|
"choices": {},
|
|
"consequences": {},
|
|
"risk_history": {},
|
|
"reward_history": {},
|
|
}
|
|
|
|
for cid, choice in engine._choices.items():
|
|
data["choices"][cid] = {
|
|
"choice_id": choice.choice_id,
|
|
"task_id": choice.task_id,
|
|
"actor": choice.actor,
|
|
"action_taken": choice.action_taken,
|
|
"alternatives": choice.alternatives,
|
|
"estimated_risk": choice.estimated_risk,
|
|
"estimated_reward": choice.estimated_reward,
|
|
"rationale": choice.rationale,
|
|
"context": choice.context,
|
|
}
|
|
|
|
for cid, consequence in engine._consequences.items():
|
|
data["consequences"][cid] = {
|
|
"choice_id": consequence.choice_id,
|
|
"outcome_positive": consequence.outcome_positive,
|
|
"actual_risk_realized": consequence.actual_risk_realized,
|
|
"actual_reward_gained": consequence.actual_reward_gained,
|
|
"description": consequence.description,
|
|
"cost": consequence.cost,
|
|
"benefit": consequence.benefit,
|
|
"surprise_factor": consequence.surprise_factor,
|
|
}
|
|
|
|
data["risk_history"] = dict(engine._risk_history)
|
|
data["reward_history"] = dict(engine._reward_history)
|
|
|
|
path = self._dir / "consequences.json"
|
|
self._atomic_write(path, data)
|
|
logger.info(
|
|
"PersistentLearningStore: consequences saved",
|
|
extra={"choices": len(data["choices"]), "consequences": len(data["consequences"])},
|
|
)
|
|
return str(path)
|
|
|
|
def load_consequences(self, engine: Any) -> int:
|
|
"""Restore ConsequenceEngine state from disk.
|
|
|
|
Args:
|
|
engine: A ConsequenceEngine instance to populate.
|
|
|
|
Returns:
|
|
Number of choices loaded.
|
|
"""
|
|
path = self._dir / "consequences.json"
|
|
if not path.exists():
|
|
return 0
|
|
|
|
data = json.loads(path.read_text(encoding="utf-8"))
|
|
engine._risk_history = data.get("risk_history", {})
|
|
engine._reward_history = data.get("reward_history", {})
|
|
|
|
loaded = len(data.get("choices", {}))
|
|
logger.info("PersistentLearningStore: consequences loaded", extra={"choices": loaded})
|
|
return loaded
|
|
|
|
def save_ethics(self, ethics: Any) -> str:
|
|
"""Persist AdaptiveEthics lessons to disk.
|
|
|
|
Args:
|
|
ethics: An AdaptiveEthics instance.
|
|
|
|
Returns:
|
|
Path to the saved file.
|
|
"""
|
|
lessons_data: list[dict[str, Any]] = []
|
|
for lesson in ethics._lessons:
|
|
lessons_data.append({
|
|
"action_type": lesson.action_type,
|
|
"context_summary": lesson.context_summary,
|
|
"advisory_reason": lesson.advisory_reason,
|
|
"proceeded": lesson.proceeded,
|
|
"outcome_positive": lesson.outcome_positive,
|
|
"weight": lesson.weight,
|
|
"occurrences": lesson.occurrences,
|
|
})
|
|
|
|
data = {
|
|
"lessons": lessons_data,
|
|
"total_experiences": ethics._total_experiences,
|
|
"learning_rate": ethics._learning_rate,
|
|
}
|
|
|
|
path = self._dir / "ethics.json"
|
|
self._atomic_write(path, data)
|
|
logger.info(
|
|
"PersistentLearningStore: ethics saved",
|
|
extra={"lessons": len(lessons_data)},
|
|
)
|
|
return str(path)
|
|
|
|
def load_ethics(self, ethics: Any) -> int:
|
|
"""Restore AdaptiveEthics lessons from disk.
|
|
|
|
Args:
|
|
ethics: An AdaptiveEthics instance to populate.
|
|
|
|
Returns:
|
|
Number of lessons loaded.
|
|
"""
|
|
path = self._dir / "ethics.json"
|
|
if not path.exists():
|
|
return 0
|
|
|
|
data = json.loads(path.read_text(encoding="utf-8"))
|
|
ethics._total_experiences = data.get("total_experiences", 0)
|
|
|
|
loaded = len(data.get("lessons", []))
|
|
logger.info("PersistentLearningStore: ethics loaded", extra={"lessons": loaded})
|
|
return loaded
|
|
|
|
def save_risk_histories(self, engine: Any) -> str:
|
|
"""Persist risk/reward history separately for quick access.
|
|
|
|
Args:
|
|
engine: A ConsequenceEngine instance.
|
|
|
|
Returns:
|
|
Path to the saved file.
|
|
"""
|
|
data = {
|
|
"risk_history": dict(engine._risk_history),
|
|
"reward_history": dict(engine._reward_history),
|
|
"window_size": engine._risk_window,
|
|
}
|
|
path = self._dir / "risk_histories.json"
|
|
self._atomic_write(path, data)
|
|
return str(path)
|
|
|
|
def _atomic_write(self, path: Path, data: dict[str, Any]) -> None:
|
|
"""Write JSON atomically via temp file + rename."""
|
|
tmp = path.with_suffix(".tmp")
|
|
tmp.write_text(json.dumps(data, indent=2, default=str), encoding="utf-8")
|
|
os.replace(str(tmp), str(path))
|
|
|
|
|
|
__all__ = ["PersistentLearningStore"]
|