Items completed: 1. Merged PR #2 (starlette/httpx deps) 2. Fixed async race condition in multimodal_ui.py 3. Wired TTSAdapter (ElevenLabs, Azure) in API routes 4. Moved super_big_brain.py from core/ to reasoning/ (backward compat shim) 5. Added API authentication middleware (Bearer token via FUSIONAGI_API_KEY) 6. Added async adapter interface (acomplete/acomplete_structured) 7. Migrated FastAPI on_event to lifespan (fixes 20 deprecation warnings) 8. Liquid Neural Networks (continuous-time adaptive weights) 9. Quantum-AI Hybrid compute backend (simulator + optimization) 10. Embodied Intelligence / Robotics bridge (actuator + sensor protocols) 11. Consciousness Engineering (formal self-model with introspection) 12. ASI Scoring Rubric (C/A/L/N/R self-assessment harness) 13. GPU integration tests for TensorFlow backend 14. Multi-stage production Dockerfile 15. Gitea CI/CD pipeline (lint, test matrix, Docker build) 16. API rate limiting middleware (per-IP sliding window) 17. OpenAPI docs cleanup (auth + rate limiting descriptions) 18. Benchmarking suite (decomposition, multi-path, recomposition, e2e) 19. Plugin system (head registry for custom heads) 427 tests passing, 0 ruff errors, 0 mypy errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
98 lines
2.9 KiB
Python
98 lines
2.9 KiB
Python
"""Abstract LLM adapter interface; model-agnostic for orchestrator and agents."""
|
|
|
|
from abc import ABC, abstractmethod
|
|
from typing import Any
|
|
|
|
|
|
class LLMAdapter(ABC):
|
|
"""Abstract adapter for LLM completion.
|
|
|
|
Implementations should handle:
|
|
- openai/ - OpenAI API (GPT-4, etc.)
|
|
- anthropic/ - Anthropic API (Claude, etc.)
|
|
- local/ - Local models (Ollama, etc.)
|
|
"""
|
|
|
|
@abstractmethod
|
|
def complete(
|
|
self,
|
|
messages: list[dict[str, str]],
|
|
**kwargs: Any,
|
|
) -> str:
|
|
"""Return completion text for the given messages.
|
|
|
|
Args:
|
|
messages: List of message dicts with 'role' and 'content' keys.
|
|
**kwargs: Provider-specific options (e.g., temperature, max_tokens).
|
|
|
|
Returns:
|
|
The model's response text.
|
|
"""
|
|
...
|
|
|
|
def complete_structured(
|
|
self,
|
|
messages: list[dict[str, str]],
|
|
schema: dict[str, Any] | None = None,
|
|
**kwargs: Any,
|
|
) -> Any:
|
|
"""Return structured (JSON) output.
|
|
|
|
Default implementation returns None; subclasses may override to use
|
|
provider-specific JSON modes (e.g., OpenAI's response_format).
|
|
|
|
Args:
|
|
messages: List of message dicts with 'role' and 'content' keys.
|
|
schema: Optional JSON schema for response validation.
|
|
**kwargs: Provider-specific options.
|
|
|
|
Returns:
|
|
Parsed JSON response or None if not supported/parsing fails.
|
|
"""
|
|
return None
|
|
|
|
async def acomplete(
|
|
self,
|
|
messages: list[dict[str, str]],
|
|
**kwargs: Any,
|
|
) -> str:
|
|
"""Async completion — default wraps sync ``complete()`` in a thread.
|
|
|
|
Subclasses with native async support (e.g., httpx-based providers)
|
|
should override this for true non-blocking I/O.
|
|
|
|
Args:
|
|
messages: List of message dicts with 'role' and 'content' keys.
|
|
**kwargs: Provider-specific options.
|
|
|
|
Returns:
|
|
The model's response text.
|
|
"""
|
|
import asyncio
|
|
|
|
loop = asyncio.get_running_loop()
|
|
return await loop.run_in_executor(None, lambda: self.complete(messages, **kwargs))
|
|
|
|
async def acomplete_structured(
|
|
self,
|
|
messages: list[dict[str, str]],
|
|
schema: dict[str, Any] | None = None,
|
|
**kwargs: Any,
|
|
) -> Any:
|
|
"""Async structured completion — default wraps sync version.
|
|
|
|
Args:
|
|
messages: List of message dicts with 'role' and 'content' keys.
|
|
schema: Optional JSON schema for response validation.
|
|
**kwargs: Provider-specific options.
|
|
|
|
Returns:
|
|
Parsed JSON response or None.
|
|
"""
|
|
import asyncio
|
|
|
|
loop = asyncio.get_running_loop()
|
|
return await loop.run_in_executor(
|
|
None, lambda: self.complete_structured(messages, schema=schema, **kwargs)
|
|
)
|