Items completed: 1. Merged PR #2 (starlette/httpx deps) 2. Fixed async race condition in multimodal_ui.py 3. Wired TTSAdapter (ElevenLabs, Azure) in API routes 4. Moved super_big_brain.py from core/ to reasoning/ (backward compat shim) 5. Added API authentication middleware (Bearer token via FUSIONAGI_API_KEY) 6. Added async adapter interface (acomplete/acomplete_structured) 7. Migrated FastAPI on_event to lifespan (fixes 20 deprecation warnings) 8. Liquid Neural Networks (continuous-time adaptive weights) 9. Quantum-AI Hybrid compute backend (simulator + optimization) 10. Embodied Intelligence / Robotics bridge (actuator + sensor protocols) 11. Consciousness Engineering (formal self-model with introspection) 12. ASI Scoring Rubric (C/A/L/N/R self-assessment harness) 13. GPU integration tests for TensorFlow backend 14. Multi-stage production Dockerfile 15. Gitea CI/CD pipeline (lint, test matrix, Docker build) 16. API rate limiting middleware (per-IP sliding window) 17. OpenAPI docs cleanup (auth + rate limiting descriptions) 18. Benchmarking suite (decomposition, multi-path, recomposition, e2e) 19. Plugin system (head registry for custom heads) 427 tests passing, 0 ruff errors, 0 mypy errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
139 lines
5.3 KiB
Python
139 lines
5.3 KiB
Python
"""Super Big Brain orchestrator: tokenless, recursive, graph-backed reasoning."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from dataclasses import dataclass
|
|
from typing import Any
|
|
|
|
from fusionagi._logger import logger
|
|
from fusionagi.memory.semantic_graph import SemanticGraphMemory
|
|
from fusionagi.memory.sharding import shard_context
|
|
from fusionagi.reasoning.context_loader import build_compact_prompt, load_context_for_reasoning
|
|
from fusionagi.reasoning.decomposition import decompose_recursive
|
|
from fusionagi.reasoning.gpu_scoring import generate_and_score_gpu
|
|
from fusionagi.reasoning.meta_reasoning import challenge_assumptions, detect_contradictions
|
|
from fusionagi.reasoning.multi_path import generate_and_score_parallel
|
|
from fusionagi.reasoning.recomposition import RecomposedResponse, recompose
|
|
from fusionagi.reasoning.tot import ThoughtNode, expand_node, prune_subtree
|
|
from fusionagi.schemas.grounding import Citation
|
|
from fusionagi.schemas.head import HeadClaim, HeadId, HeadOutput, HeadRisk
|
|
|
|
|
|
@dataclass
|
|
class SuperBigBrainConfig:
|
|
"""Configuration for Super Big Brain pipeline."""
|
|
|
|
max_decomposition_depth: int = 3
|
|
min_depth_before_conclusion: int = 1
|
|
parallel_hypotheses: int = 3
|
|
prune_threshold: float = 0.3
|
|
max_context_chars: int = 4000
|
|
use_gpu: bool = True
|
|
|
|
|
|
def run_super_big_brain(
|
|
prompt: str,
|
|
semantic_graph: SemanticGraphMemory,
|
|
config: SuperBigBrainConfig | None = None,
|
|
adapter: Any | None = None,
|
|
) -> RecomposedResponse:
|
|
"""
|
|
End-to-end Super Big Brain pipeline:
|
|
|
|
1. Decompose prompt -> atomic units
|
|
2. Shard and load context
|
|
3. Run hierarchical ToT with multi-path inference
|
|
4. Recompose with traceability
|
|
5. Persist units/relations to semantic graph
|
|
"""
|
|
cfg = config or SuperBigBrainConfig()
|
|
decomp = decompose_recursive(prompt, max_depth=cfg.max_decomposition_depth)
|
|
if not decomp.units:
|
|
return RecomposedResponse(summary="No content to reason over.", confidence=0.0)
|
|
|
|
semantic_graph.ingest_decomposition(decomp.units, decomp.relations)
|
|
load_context_for_reasoning(decomp.units, semantic_graph=semantic_graph, sharder=shard_context) # type: ignore[arg-type]
|
|
compact = build_compact_prompt(decomp.units, max_chars=cfg.max_context_chars)
|
|
|
|
hypotheses = [u.content for u in decomp.units[:cfg.parallel_hypotheses] if u.content]
|
|
if not hypotheses:
|
|
hypotheses = [compact[:500]]
|
|
|
|
if cfg.use_gpu:
|
|
scored = generate_and_score_gpu(hypotheses, decomp.units)
|
|
else:
|
|
scored = generate_and_score_parallel(hypotheses, decomp.units)
|
|
nodes = [n for n, _ in sorted(scored, key=lambda x: x[1], reverse=True)]
|
|
best = nodes[0] if nodes else ThoughtNode(thought=compact[:300], unit_refs=[u.unit_id for u in decomp.units[:5]])
|
|
|
|
if cfg.min_depth_before_conclusion > 0 and best.depth < cfg.min_depth_before_conclusion:
|
|
child = expand_node(best, compact[:200], unit_refs=best.unit_refs)
|
|
child.score = best.score
|
|
best = child
|
|
|
|
prune_subtree(best, cfg.prune_threshold)
|
|
assumptions = challenge_assumptions(decomp.units, best.thought)
|
|
contradictions = detect_contradictions(decomp.units)
|
|
|
|
recomp = recompose([best], decomp.units)
|
|
recomp.metadata["assumptions_flagged"] = len(assumptions)
|
|
recomp.metadata["contradictions"] = len(contradictions)
|
|
recomp.metadata["depth"] = best.depth
|
|
|
|
logger.info(
|
|
"Super Big Brain complete",
|
|
extra={"units": len(decomp.units), "confidence": recomp.confidence},
|
|
)
|
|
return recomp
|
|
|
|
|
|
def _recomposed_to_head_output(
|
|
recomp: RecomposedResponse,
|
|
head_id: HeadId,
|
|
) -> HeadOutput:
|
|
"""Convert RecomposedResponse to HeadOutput for Dvādaśa integration."""
|
|
claims = [
|
|
HeadClaim(
|
|
claim_text=c,
|
|
confidence=recomp.confidence,
|
|
evidence=[Citation(source_id=uid, excerpt="", confidence=recomp.confidence) for uid in recomp.unit_refs[:3]],
|
|
assumptions=[],
|
|
)
|
|
for c in recomp.key_claims[:5]
|
|
]
|
|
if not claims:
|
|
claims = [
|
|
HeadClaim(claim_text=recomp.summary, confidence=recomp.confidence, evidence=[], assumptions=[]),
|
|
]
|
|
risks = []
|
|
if recomp.metadata.get("assumptions_flagged", 0) > 0:
|
|
risks.append(HeadRisk(description="Assumptions flagged; verify before acting", severity="medium"))
|
|
if recomp.metadata.get("contradictions", 0) > 0:
|
|
risks.append(HeadRisk(description="Contradictions detected in context", severity="high"))
|
|
return HeadOutput(
|
|
head_id=head_id,
|
|
summary=recomp.summary,
|
|
claims=claims,
|
|
risks=risks,
|
|
questions=[],
|
|
recommended_actions=["Consider flagged assumptions", "Resolve contradictions if any"],
|
|
tone_guidance="",
|
|
)
|
|
|
|
|
|
class SuperBigBrainReasoningProvider:
|
|
"""ReasoningProvider for HeadAgent: uses Super Big Brain pipeline."""
|
|
|
|
def __init__(
|
|
self,
|
|
semantic_graph: SemanticGraphMemory | None = None,
|
|
config: SuperBigBrainConfig | None = None,
|
|
) -> None:
|
|
self._graph = semantic_graph or SemanticGraphMemory()
|
|
self._config = config or SuperBigBrainConfig()
|
|
|
|
def produce_head_output(self, head_id: HeadId, prompt: str) -> HeadOutput:
|
|
"""Produce HeadOutput using Super Big Brain pipeline."""
|
|
recomp = run_super_big_brain(prompt, self._graph, self._config)
|
|
return _recomposed_to_head_output(recomp, head_id)
|