Next-level improvements: 15 items across backend, frontend, and testing
Backend: - SQLiteStateBackend: persistent task/trace storage with SQLite - InMemoryStateBackend: in-memory impl of StateBackend interface - Redis cache backend (CacheBackend ABC + MemoryCacheBackend + RedisCacheBackend) - OpenAI adapter: async acomplete() with retry logic - Per-tenant + per-IP rate limiting in middleware Frontend: - State management: useStore + useAppState (zero-dep, context + reducer) - React Router integration: URL-based navigation (usePageNavigation) - WebSocket streaming: sendPrompt + StreamCallbacks for token-by-token updates - File preview: inline image/text/binary preview with expand/collapse - Sparkline charts + MetricCard + BarChart for dashboard visualization - Push notifications hook (useNotifications) with browser Notification API - i18n system: 6 locales (en, es, fr, de, ja, zh) with interpolation - 6 new Storybook stories (ChatMessage, Skeleton, Markdown, SearchFilter, Toast, FilePreview) Testing: - Playwright E2E config + 6 browser specs (desktop + mobile) - 18 new Python tests (SQLiteStateBackend, InMemoryStateBackend, cache backends) 570 Python tests + 45 frontend tests = 615 total, 0 ruff errors. Co-Authored-By: Nakamoto, S <defi@defi-oracle.io>
This commit is contained in:
@@ -213,6 +213,57 @@ class OpenAIAdapter(LLMAdapter):
|
||||
raise self._classify_error(last_error) from last_error
|
||||
raise OpenAIAdapterError("All retries exhausted with unknown error")
|
||||
|
||||
async def acomplete(
|
||||
self,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Async version of complete using OpenAI's async client.
|
||||
|
||||
Args:
|
||||
messages: List of message dicts with 'role' and 'content'.
|
||||
**kwargs: Additional arguments for the API call.
|
||||
|
||||
Returns:
|
||||
The assistant's response content.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
if not messages:
|
||||
return ""
|
||||
|
||||
try:
|
||||
import openai
|
||||
except ImportError as e:
|
||||
raise ImportError("Install with: pip install fusionagi[openai]") from e
|
||||
|
||||
async_client = openai.AsyncOpenAI(api_key=self._api_key, **self._client_kwargs)
|
||||
model = kwargs.pop("model", self._model)
|
||||
last_error: Exception | None = None
|
||||
delay = self._retry_delay
|
||||
|
||||
for attempt in range(self._max_retries + 1):
|
||||
try:
|
||||
response = await async_client.chat.completions.create(
|
||||
model=model, messages=messages, **kwargs # type: ignore[arg-type]
|
||||
)
|
||||
content = response.choices[0].message.content or ""
|
||||
return content
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
if not self._is_retryable_error(e) or attempt == self._max_retries:
|
||||
break
|
||||
logger.warning(
|
||||
"OpenAI async retry",
|
||||
extra={"attempt": attempt + 1, "error": str(e), "delay": delay},
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
delay = min(delay * self._retry_multiplier, self._max_retry_delay)
|
||||
|
||||
if last_error is not None:
|
||||
raise self._classify_error(last_error) from last_error
|
||||
raise OpenAIAdapterError("All retries exhausted")
|
||||
|
||||
def complete_structured(
|
||||
self,
|
||||
messages: list[dict[str, str]],
|
||||
|
||||
@@ -93,23 +93,39 @@ def create_app(
|
||||
_buckets: dict[str, list[float]] = defaultdict(list)
|
||||
|
||||
class RateLimitMiddleware(BaseHTTPMiddleware):
|
||||
"""Per-IP sliding window rate limiter (advisory mode).
|
||||
"""Per-tenant + per-IP sliding window rate limiter (advisory mode).
|
||||
|
||||
Logs rate limit exceedances but allows the request through.
|
||||
Consistent with the advisory governance philosophy.
|
||||
Tracks both IP-level and tenant-level request rates. Logs exceedances
|
||||
but allows requests through (advisory governance).
|
||||
"""
|
||||
|
||||
async def dispatch(self, request: Request, call_next: Any) -> Response:
|
||||
client_ip = request.client.host if request.client else "unknown"
|
||||
tenant_id = request.headers.get("x-tenant-id", "default")
|
||||
now = time.monotonic()
|
||||
cutoff = now - rate_window
|
||||
_buckets[client_ip] = [t for t in _buckets[client_ip] if t > cutoff]
|
||||
if len(_buckets[client_ip]) >= rate_limit:
|
||||
|
||||
# Per-IP tracking
|
||||
ip_key = f"ip:{client_ip}"
|
||||
_buckets[ip_key] = [t for t in _buckets[ip_key] if t > cutoff]
|
||||
if len(_buckets[ip_key]) >= rate_limit:
|
||||
logger.info(
|
||||
"API rate limit advisory: limit exceeded (proceeding)",
|
||||
extra={"client_ip": client_ip, "count": len(_buckets[client_ip]), "limit": rate_limit},
|
||||
"API rate limit advisory: IP limit exceeded (proceeding)",
|
||||
extra={"client_ip": client_ip, "count": len(_buckets[ip_key]), "limit": rate_limit},
|
||||
)
|
||||
_buckets[client_ip].append(now)
|
||||
|
||||
# Per-tenant tracking (separate quota)
|
||||
tenant_key = f"tenant:{tenant_id}"
|
||||
tenant_limit = rate_limit * 5 # tenants get 5x the per-IP limit
|
||||
_buckets[tenant_key] = [t for t in _buckets[tenant_key] if t > cutoff]
|
||||
if len(_buckets[tenant_key]) >= tenant_limit:
|
||||
logger.info(
|
||||
"API rate limit advisory: tenant limit exceeded (proceeding)",
|
||||
extra={"tenant_id": tenant_id, "count": len(_buckets[tenant_key]), "limit": tenant_limit},
|
||||
)
|
||||
|
||||
_buckets[ip_key].append(now)
|
||||
_buckets[tenant_key].append(now)
|
||||
return await call_next(request) # type: ignore[no-any-return]
|
||||
|
||||
app.add_middleware(RateLimitMiddleware)
|
||||
|
||||
@@ -1,20 +1,176 @@
|
||||
"""In-memory response cache with TTL for the FusionAGI API."""
|
||||
"""Response cache with TTL for the FusionAGI API.
|
||||
|
||||
Provides both in-memory and Redis-backed implementations with a common interface.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
from fusionagi._logger import logger
|
||||
|
||||
|
||||
class CacheBackend(ABC):
|
||||
"""Abstract cache backend interface."""
|
||||
|
||||
@abstractmethod
|
||||
def get(self, key: str) -> Any | None:
|
||||
"""Get value by key, or None if missing/expired."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def set(self, key: str, value: Any, ttl: float | None = None) -> None:
|
||||
"""Set key/value with optional TTL."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Delete a key. Returns True if existed."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def clear(self) -> int:
|
||||
"""Clear all entries. Returns count cleared."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def stats(self) -> dict[str, Any]:
|
||||
"""Return backend stats."""
|
||||
...
|
||||
|
||||
|
||||
class MemoryCacheBackend(CacheBackend):
|
||||
"""In-memory LRU cache with TTL."""
|
||||
|
||||
def __init__(self, max_size: int = 1000, default_ttl: float = 300.0) -> None:
|
||||
self._cache: dict[str, tuple[float, float, Any]] = {} # key -> (set_time, ttl, value)
|
||||
self._max_size = max_size
|
||||
self._default_ttl = default_ttl
|
||||
|
||||
def get(self, key: str) -> Any | None:
|
||||
entry = self._cache.get(key)
|
||||
if entry is None:
|
||||
return None
|
||||
set_time, ttl, value = entry
|
||||
if time.time() - set_time > ttl:
|
||||
del self._cache[key]
|
||||
return None
|
||||
return value
|
||||
|
||||
def set(self, key: str, value: Any, ttl: float | None = None) -> None:
|
||||
if len(self._cache) >= self._max_size:
|
||||
oldest = min(self._cache, key=lambda k: self._cache[k][0])
|
||||
del self._cache[oldest]
|
||||
self._cache[key] = (time.time(), ttl or self._default_ttl, value)
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
return self._cache.pop(key, None) is not None
|
||||
|
||||
def clear(self) -> int:
|
||||
count = len(self._cache)
|
||||
self._cache.clear()
|
||||
return count
|
||||
|
||||
def stats(self) -> dict[str, Any]:
|
||||
now = time.time()
|
||||
active = sum(1 for st, ttl, _ in self._cache.values() if now - st <= ttl)
|
||||
return {"backend": "memory", "total": len(self._cache), "active": active, "max_size": self._max_size}
|
||||
|
||||
|
||||
class RedisCacheBackend(CacheBackend):
|
||||
"""Redis-backed cache. Requires the ``redis`` package.
|
||||
|
||||
Falls back to memory cache if Redis is unavailable.
|
||||
"""
|
||||
|
||||
def __init__(self, redis_url: str = "redis://localhost:6379/0", default_ttl: float = 300.0) -> None:
|
||||
self._default_ttl = default_ttl
|
||||
self._prefix = "fusionagi:cache:"
|
||||
self._redis: Any = None
|
||||
try:
|
||||
import redis
|
||||
self._redis = redis.from_url(redis_url, decode_responses=True)
|
||||
self._redis.ping()
|
||||
logger.info("Redis cache connected", extra={"url": redis_url})
|
||||
except Exception as e:
|
||||
logger.warning("Redis unavailable, cache operations will be no-ops", extra={"error": str(e)})
|
||||
self._redis = None
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
"""Check if Redis is connected."""
|
||||
return self._redis is not None
|
||||
|
||||
def _key(self, key: str) -> str:
|
||||
return f"{self._prefix}{key}"
|
||||
|
||||
def get(self, key: str) -> Any | None:
|
||||
if not self._redis:
|
||||
return None
|
||||
try:
|
||||
raw = self._redis.get(self._key(key))
|
||||
if raw is None:
|
||||
return None
|
||||
return json.loads(raw)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def set(self, key: str, value: Any, ttl: float | None = None) -> None:
|
||||
if not self._redis:
|
||||
return
|
||||
try:
|
||||
ttl_seconds = int(ttl or self._default_ttl)
|
||||
self._redis.setex(self._key(key), ttl_seconds, json.dumps(value))
|
||||
except Exception as e:
|
||||
logger.warning("Redis set failed", extra={"error": str(e)})
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
if not self._redis:
|
||||
return False
|
||||
try:
|
||||
return bool(self._redis.delete(self._key(key)))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def clear(self) -> int:
|
||||
if not self._redis:
|
||||
return 0
|
||||
try:
|
||||
keys = self._redis.keys(f"{self._prefix}*")
|
||||
if keys:
|
||||
return self._redis.delete(*keys)
|
||||
return 0
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def stats(self) -> dict[str, Any]:
|
||||
if not self._redis:
|
||||
return {"backend": "redis", "available": False}
|
||||
try:
|
||||
info = self._redis.info("keyspace")
|
||||
return {"backend": "redis", "available": True, "info": info}
|
||||
except Exception:
|
||||
return {"backend": "redis", "available": False}
|
||||
|
||||
|
||||
class ResponseCache:
|
||||
"""LRU-like response cache with configurable TTL.
|
||||
"""High-level response cache with pluggable backend.
|
||||
|
||||
For production, replace with Redis-backed cache.
|
||||
Uses MemoryCacheBackend by default. Pass a RedisCacheBackend for
|
||||
production multi-worker deployments.
|
||||
"""
|
||||
|
||||
def __init__(self, max_size: int = 1000, ttl_seconds: float = 300.0) -> None:
|
||||
self._cache: dict[str, tuple[float, Any]] = {}
|
||||
self._max_size = max_size
|
||||
def __init__(
|
||||
self,
|
||||
backend: CacheBackend | None = None,
|
||||
max_size: int = 1000,
|
||||
ttl_seconds: float = 300.0,
|
||||
) -> None:
|
||||
self._backend = backend or MemoryCacheBackend(max_size=max_size, default_ttl=ttl_seconds)
|
||||
self._ttl = ttl_seconds
|
||||
|
||||
@staticmethod
|
||||
@@ -26,36 +182,22 @@ class ResponseCache:
|
||||
def get(self, prompt: str, session_id: str, tenant_id: str = "default") -> Any | None:
|
||||
"""Get cached response if it exists and hasn't expired."""
|
||||
key = self._make_key(prompt, session_id, tenant_id)
|
||||
entry = self._cache.get(key)
|
||||
if entry is None:
|
||||
return None
|
||||
ts, value = entry
|
||||
if time.time() - ts > self._ttl:
|
||||
del self._cache[key]
|
||||
return None
|
||||
return value
|
||||
return self._backend.get(key)
|
||||
|
||||
def set(self, prompt: str, session_id: str, value: Any, tenant_id: str = "default") -> None:
|
||||
"""Cache a response."""
|
||||
if len(self._cache) >= self._max_size:
|
||||
oldest_key = min(self._cache, key=lambda k: self._cache[k][0])
|
||||
del self._cache[oldest_key]
|
||||
key = self._make_key(prompt, session_id, tenant_id)
|
||||
self._cache[key] = (time.time(), value)
|
||||
self._backend.set(key, value, self._ttl)
|
||||
|
||||
def invalidate(self, prompt: str, session_id: str, tenant_id: str = "default") -> bool:
|
||||
"""Remove a specific cache entry."""
|
||||
key = self._make_key(prompt, session_id, tenant_id)
|
||||
return self._cache.pop(key, None) is not None
|
||||
return self._backend.delete(key)
|
||||
|
||||
def clear(self) -> int:
|
||||
"""Clear all cache entries. Returns count of cleared entries."""
|
||||
count = len(self._cache)
|
||||
self._cache.clear()
|
||||
return count
|
||||
"""Clear all cache entries."""
|
||||
return self._backend.clear()
|
||||
|
||||
def stats(self) -> dict[str, int]:
|
||||
def stats(self) -> dict[str, Any]:
|
||||
"""Return cache statistics."""
|
||||
now = time.time()
|
||||
active = sum(1 for ts, _ in self._cache.values() if now - ts <= self._ttl)
|
||||
return {"total": len(self._cache), "active": active, "max_size": self._max_size}
|
||||
return self._backend.stats()
|
||||
|
||||
68
fusionagi/core/memory_backend.py
Normal file
68
fusionagi/core/memory_backend.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""In-memory state backend for task persistence.
|
||||
|
||||
Useful for testing and development when no database is needed.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from fusionagi.core.persistence import StateBackend
|
||||
from fusionagi.schemas.task import Task, TaskState
|
||||
|
||||
|
||||
class InMemoryStateBackend(StateBackend):
|
||||
"""In-memory implementation of StateBackend.
|
||||
|
||||
All data is lost on process restart. Use SQLiteStateBackend
|
||||
or a Postgres-backed backend for production persistence.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._tasks: dict[str, Task] = {}
|
||||
self._traces: dict[str, list[dict[str, Any]]] = {}
|
||||
|
||||
def get_task(self, task_id: str) -> Task | None:
|
||||
"""Load task by id."""
|
||||
return self._tasks.get(task_id)
|
||||
|
||||
def set_task(self, task: Task) -> None:
|
||||
"""Save task."""
|
||||
self._tasks[task.task_id] = task
|
||||
|
||||
def get_task_state(self, task_id: str) -> TaskState | None:
|
||||
"""Return current task state or None if task unknown."""
|
||||
task = self._tasks.get(task_id)
|
||||
return task.state if task else None
|
||||
|
||||
def set_task_state(self, task_id: str, state: TaskState) -> None:
|
||||
"""Update task state; creates no task if missing."""
|
||||
task = self._tasks.get(task_id)
|
||||
if task is not None:
|
||||
self._tasks[task_id] = task.model_copy(update={"state": state})
|
||||
|
||||
def append_trace(self, task_id: str, entry: dict[str, Any]) -> None:
|
||||
"""Append trace entry."""
|
||||
if task_id not in self._traces:
|
||||
self._traces[task_id] = []
|
||||
self._traces[task_id].append(entry)
|
||||
|
||||
def get_trace(self, task_id: str) -> list[dict[str, Any]]:
|
||||
"""Load trace for task."""
|
||||
return list(self._traces.get(task_id, []))
|
||||
|
||||
def list_tasks(self, state: TaskState | None = None, limit: int = 100) -> list[Task]:
|
||||
"""List tasks, optionally filtered by state."""
|
||||
tasks = list(self._tasks.values())
|
||||
if state is not None:
|
||||
tasks = [t for t in tasks if t.state == state]
|
||||
return tasks[:limit]
|
||||
|
||||
def delete_task(self, task_id: str) -> bool:
|
||||
"""Delete a task and its traces."""
|
||||
self._traces.pop(task_id, None)
|
||||
return self._tasks.pop(task_id, None) is not None
|
||||
|
||||
def count_tasks(self) -> int:
|
||||
"""Return total task count."""
|
||||
return len(self._tasks)
|
||||
189
fusionagi/core/sqlite_backend.py
Normal file
189
fusionagi/core/sqlite_backend.py
Normal file
@@ -0,0 +1,189 @@
|
||||
"""SQLite-backed state backend for task persistence.
|
||||
|
||||
Uses synchronous sqlite3 wrapped in a thread pool for async compatibility.
|
||||
For production Postgres, swap with asyncpg or SQLAlchemy async.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
import threading
|
||||
from typing import Any
|
||||
|
||||
from fusionagi._logger import logger
|
||||
from fusionagi.core.persistence import StateBackend
|
||||
from fusionagi.schemas.task import Task, TaskState
|
||||
|
||||
|
||||
class SQLiteStateBackend(StateBackend):
|
||||
"""SQLite-backed implementation of StateBackend.
|
||||
|
||||
Stores tasks, task states, and traces in a local SQLite database.
|
||||
Thread-safe via a threading lock on write operations.
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: str = "fusionagi_state.db") -> None:
|
||||
self._db_path = db_path
|
||||
self._lock = threading.Lock()
|
||||
self._init_schema()
|
||||
|
||||
def _get_conn(self) -> sqlite3.Connection:
|
||||
"""Get a new connection (sqlite3 connections are not thread-safe)."""
|
||||
conn = sqlite3.connect(self._db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def _init_schema(self) -> None:
|
||||
"""Create tables if they don't exist."""
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
conn.executescript("""
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
task_id TEXT PRIMARY KEY,
|
||||
data TEXT NOT NULL,
|
||||
state TEXT NOT NULL DEFAULT 'pending',
|
||||
created_at TEXT,
|
||||
updated_at TEXT
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS traces (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id TEXT NOT NULL,
|
||||
entry TEXT NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (task_id) REFERENCES tasks(task_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_traces_task ON traces(task_id);
|
||||
""")
|
||||
conn.commit()
|
||||
finally:
|
||||
conn.close()
|
||||
logger.info("SQLiteStateBackend initialized", extra={"db_path": self._db_path})
|
||||
|
||||
def get_task(self, task_id: str) -> Task | None:
|
||||
"""Load task by id."""
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
row = conn.execute("SELECT data FROM tasks WHERE task_id = ?", (task_id,)).fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return Task.model_validate_json(row["data"])
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def set_task(self, task: Task) -> None:
|
||||
"""Save or update a task."""
|
||||
with self._lock:
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
data = task.model_dump_json()
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO tasks (task_id, data, state, created_at, updated_at) "
|
||||
"VALUES (?, ?, ?, ?, ?)",
|
||||
(
|
||||
task.task_id,
|
||||
data,
|
||||
task.state.value,
|
||||
task.created_at.isoformat() if task.created_at else None,
|
||||
task.updated_at.isoformat() if task.updated_at else None,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def get_task_state(self, task_id: str) -> TaskState | None:
|
||||
"""Return current task state or None if task unknown."""
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
row = conn.execute("SELECT state FROM tasks WHERE task_id = ?", (task_id,)).fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return TaskState(row["state"])
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def set_task_state(self, task_id: str, state: TaskState) -> None:
|
||||
"""Update task state; creates no task if missing."""
|
||||
with self._lock:
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
task = self.get_task(task_id)
|
||||
if task is not None:
|
||||
conn.execute(
|
||||
"UPDATE tasks SET state = ?, updated_at = CURRENT_TIMESTAMP WHERE task_id = ?",
|
||||
(state.value, task_id),
|
||||
)
|
||||
# Also update the JSON data blob
|
||||
updated = task.model_copy(update={"state": state})
|
||||
conn.execute(
|
||||
"UPDATE tasks SET data = ? WHERE task_id = ?",
|
||||
(updated.model_dump_json(), task_id),
|
||||
)
|
||||
conn.commit()
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def append_trace(self, task_id: str, entry: dict[str, Any]) -> None:
|
||||
"""Append trace entry."""
|
||||
with self._lock:
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
conn.execute(
|
||||
"INSERT INTO traces (task_id, entry) VALUES (?, ?)",
|
||||
(task_id, json.dumps(entry)),
|
||||
)
|
||||
conn.commit()
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def get_trace(self, task_id: str) -> list[dict[str, Any]]:
|
||||
"""Load trace for task."""
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
rows = conn.execute(
|
||||
"SELECT entry FROM traces WHERE task_id = ? ORDER BY id",
|
||||
(task_id,),
|
||||
).fetchall()
|
||||
return [json.loads(row["entry"]) for row in rows]
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def list_tasks(self, state: TaskState | None = None, limit: int = 100) -> list[Task]:
|
||||
"""List tasks, optionally filtered by state."""
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
if state is not None:
|
||||
rows = conn.execute(
|
||||
"SELECT data FROM tasks WHERE state = ? ORDER BY rowid DESC LIMIT ?",
|
||||
(state.value, limit),
|
||||
).fetchall()
|
||||
else:
|
||||
rows = conn.execute(
|
||||
"SELECT data FROM tasks ORDER BY rowid DESC LIMIT ?",
|
||||
(limit,),
|
||||
).fetchall()
|
||||
return [Task.model_validate_json(row["data"]) for row in rows]
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def delete_task(self, task_id: str) -> bool:
|
||||
"""Delete a task and its traces."""
|
||||
with self._lock:
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
conn.execute("DELETE FROM traces WHERE task_id = ?", (task_id,))
|
||||
cursor = conn.execute("DELETE FROM tasks WHERE task_id = ?", (task_id,))
|
||||
conn.commit()
|
||||
return cursor.rowcount > 0
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def count_tasks(self) -> int:
|
||||
"""Return total task count."""
|
||||
conn = self._get_conn()
|
||||
try:
|
||||
row = conn.execute("SELECT COUNT(*) as cnt FROM tasks").fetchone()
|
||||
return row["cnt"] if row else 0
|
||||
finally:
|
||||
conn.close()
|
||||
Reference in New Issue
Block a user