diff --git a/.gitignore b/.gitignore index 141701d..ddd09b3 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,9 @@ ts/node_modules/ .env infra/node_modules/ infra/cdk.out/ + +# Aperant data directory +.auto-claude/ + +# Git worktrees +.worktrees/ diff --git a/migrations/012_slo_config.sql b/migrations/012_slo_config.sql new file mode 100644 index 0000000..c68904f --- /dev/null +++ b/migrations/012_slo_config.sql @@ -0,0 +1,29 @@ +-- SLO definitions and alerts (F3: SLO Dashboard) + +CREATE TABLE IF NOT EXISTS slo_definitions ( + id TEXT PRIMARY KEY, + org_id TEXT NOT NULL, + name TEXT NOT NULL, + metric TEXT NOT NULL, -- p50_latency, p95_latency, p99_latency, hit_rate + operator TEXT NOT NULL, -- lt, gt + threshold DOUBLE PRECISION NOT NULL, + window_minutes INTEGER NOT NULL DEFAULT 60, + enabled BOOLEAN NOT NULL DEFAULT TRUE, + alert_channels JSONB DEFAULT '[]'::jsonb, + created_at TIMESTAMPTZ DEFAULT now(), + updated_at TIMESTAMPTZ DEFAULT now() +); + +CREATE TABLE IF NOT EXISTS slo_alerts ( + id BIGSERIAL PRIMARY KEY, + org_id TEXT NOT NULL, + slo_id TEXT NOT NULL REFERENCES slo_definitions(id) ON DELETE CASCADE, + metric_value DOUBLE PRECISION NOT NULL, + threshold DOUBLE PRECISION NOT NULL, + status TEXT NOT NULL, -- firing, resolved + dispatched_to JSONB DEFAULT '[]'::jsonb, + created_at TIMESTAMPTZ DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_slo_alerts_slo_id ON slo_alerts(slo_id); +CREATE INDEX IF NOT EXISTS idx_slo_alerts_created ON slo_alerts(created_at DESC); diff --git a/migrations/013_retrieval_profiles.sql b/migrations/013_retrieval_profiles.sql new file mode 100644 index 0000000..8738bb5 --- /dev/null +++ b/migrations/013_retrieval_profiles.sql @@ -0,0 +1,26 @@ +-- Retrieval profiles (F4: Adaptive Retrieval Profiles) + +CREATE TABLE IF NOT EXISTS retrieval_profiles ( + id TEXT PRIMARY KEY, + org_id TEXT NOT NULL, + name TEXT NOT NULL, + semantic_weight REAL NOT NULL DEFAULT 1.0, + graph_weight REAL NOT NULL DEFAULT 1.0, + recency_bias REAL NOT NULL DEFAULT 30.0, + tier_filters TEXT[] DEFAULT NULL, + min_score REAL NOT NULL DEFAULT 0.3, + max_results INT NOT NULL DEFAULT 10, + is_preset BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMPTZ DEFAULT now(), + updated_at TIMESTAMPTZ DEFAULT now(), + UNIQUE(org_id, name) +); + +INSERT INTO retrieval_profiles (id, org_id, name, semantic_weight, graph_weight, recency_bias, tier_filters, min_score, max_results, is_preset) +VALUES + ('preset-coding', '__global__', 'coding', 1.0, 0.5, 7.0, ARRAY['short','long'], 0.4, 10, TRUE), + ('preset-incident', '__global__', 'incident-response', 0.8, 1.5, 1.0, NULL, 0.2, 20, TRUE), + ('preset-research', '__global__', 'research', 1.2, 1.0, 90.0, ARRAY['long'], 0.3, 15, TRUE) +ON CONFLICT DO NOTHING; + +ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS default_profile_id TEXT; diff --git a/migrations/014_review_decisions.sql b/migrations/014_review_decisions.sql new file mode 100644 index 0000000..34f8fd7 --- /dev/null +++ b/migrations/014_review_decisions.sql @@ -0,0 +1,13 @@ +-- Review decisions audit trail (F5: Graph Approval Inbox) + +CREATE TABLE IF NOT EXISTS review_decisions ( + id TEXT PRIMARY KEY, + relationship_id TEXT NOT NULL, + action TEXT NOT NULL CHECK (action IN ('approve', 'reject')), + reviewer_id TEXT, + notes TEXT, + decided_at TIMESTAMPTZ DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_review_decisions_rel ON review_decisions(relationship_id); +CREATE INDEX IF NOT EXISTS idx_review_decisions_time ON review_decisions(decided_at DESC); diff --git a/migrations/015_retention_policies.sql b/migrations/015_retention_policies.sql new file mode 100644 index 0000000..2175b18 --- /dev/null +++ b/migrations/015_retention_policies.sql @@ -0,0 +1,41 @@ +-- Retention policies and restore drills (F6: Policy-Based Retention) + +CREATE TABLE IF NOT EXISTS retention_policies ( + id TEXT PRIMARY KEY, + org_id TEXT NOT NULL, + name TEXT NOT NULL, + retention_window JSONB NOT NULL DEFAULT '{"working": 3600, "short": 604800, "long": null}', + snapshot_schedule TEXT, + encryption_required BOOLEAN DEFAULT FALSE, + max_snapshots INT DEFAULT 50, + is_active BOOLEAN DEFAULT TRUE, + created_at TIMESTAMPTZ DEFAULT now(), + updated_at TIMESTAMPTZ DEFAULT now(), + UNIQUE(org_id, name) +); + +CREATE TABLE IF NOT EXISTS snapshot_metadata ( + id TEXT PRIMARY KEY, + org_id TEXT NOT NULL, + policy_id TEXT REFERENCES retention_policies(id), + name TEXT NOT NULL, + path TEXT NOT NULL, + size_bytes BIGINT, + memory_count INT, + encrypted BOOLEAN DEFAULT FALSE, + created_at TIMESTAMPTZ DEFAULT now() +); + +CREATE TABLE IF NOT EXISTS restore_drill_results ( + id TEXT PRIMARY KEY, + org_id TEXT NOT NULL, + snapshot_id TEXT REFERENCES snapshot_metadata(id), + snapshot_name TEXT NOT NULL, + started_at TIMESTAMPTZ NOT NULL, + completed_at TIMESTAMPTZ, + recovery_time_ms BIGINT, + memories_restored INT, + status TEXT DEFAULT 'running', + error TEXT, + created_at TIMESTAMPTZ DEFAULT now() +); diff --git a/migrations/016_workspaces.sql b/migrations/016_workspaces.sql new file mode 100644 index 0000000..c28842e --- /dev/null +++ b/migrations/016_workspaces.sql @@ -0,0 +1,41 @@ +-- Workspaces and audit log (F7: Multi-Tenant Workspace Isolation) + +CREATE TABLE IF NOT EXISTS workspaces ( + id TEXT PRIMARY KEY, + org_id TEXT NOT NULL, + name TEXT NOT NULL, + slug TEXT NOT NULL, + settings JSONB DEFAULT '{}', + created_at TIMESTAMPTZ DEFAULT now(), + archived_at TIMESTAMPTZ, + UNIQUE(org_id, slug) +); + +CREATE TABLE IF NOT EXISTS workspace_members ( + id TEXT PRIMARY KEY, + workspace_id TEXT NOT NULL REFERENCES workspaces(id), + user_id TEXT, + role TEXT NOT NULL DEFAULT 'writer', + invited_at TIMESTAMPTZ DEFAULT now(), + accepted_at TIMESTAMPTZ +); + +CREATE TABLE IF NOT EXISTS audit_log ( + id BIGSERIAL PRIMARY KEY, + org_id TEXT NOT NULL, + workspace_id TEXT, + actor_id TEXT NOT NULL, + actor_type TEXT NOT NULL, + action TEXT NOT NULL, + resource_type TEXT, + resource_id TEXT, + metadata JSONB DEFAULT '{}', + ip_address INET, + created_at TIMESTAMPTZ DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_audit_org_time ON audit_log(org_id, created_at DESC); +CREATE INDEX IF NOT EXISTS idx_audit_workspace ON audit_log(workspace_id, created_at DESC); + +ALTER TABLE memories ADD COLUMN IF NOT EXISTS workspace_id TEXT; +ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS workspace_id TEXT; diff --git a/migrations/017_recommendations.sql b/migrations/017_recommendations.sql new file mode 100644 index 0000000..c1799b2 --- /dev/null +++ b/migrations/017_recommendations.sql @@ -0,0 +1,28 @@ +-- Proactive recommendations (F9: Proactive Memory Recommendations) + +CREATE TABLE IF NOT EXISTS recommendation_feedback ( + id TEXT PRIMARY KEY, + org_id TEXT NOT NULL, + workspace_id TEXT, + memory_id TEXT NOT NULL, + actor_id TEXT NOT NULL, + signal TEXT DEFAULT 'manual', + feedback TEXT NOT NULL, + context_hash TEXT, + created_at TIMESTAMPTZ DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_rec_feedback_memory ON recommendation_feedback(memory_id); +CREATE INDEX IF NOT EXISTS idx_rec_feedback_actor ON recommendation_feedback(actor_id, created_at DESC); + +CREATE TABLE IF NOT EXISTS recommendation_config ( + id TEXT PRIMARY KEY, + workspace_id TEXT, + agent_id TEXT, + aggressiveness REAL DEFAULT 0.5, + enabled BOOLEAN DEFAULT true, + max_suggestions INTEGER DEFAULT 3, + cooldown_minutes INTEGER DEFAULT 15, + updated_at TIMESTAMPTZ DEFAULT now(), + UNIQUE(workspace_id, agent_id) +); diff --git a/src/lore/bootstrap.py b/src/lore/bootstrap.py new file mode 100644 index 0000000..1e6baf1 --- /dev/null +++ b/src/lore/bootstrap.py @@ -0,0 +1,360 @@ +"""Guided bootstrap for Lore — validates prerequisites and sets up the environment.""" + +from __future__ import annotations + +import asyncio +import shutil +import subprocess +import sys +from dataclasses import dataclass +from typing import List, Optional + + +@dataclass +class CheckResult: + """Result of a single bootstrap check.""" + name: str + status: str # "ok", "warn", "fail" + message: str + fix_hint: Optional[str] = None + + +class BootstrapRunner: + """Runs all bootstrap checks and optional fixes.""" + + def __init__( + self, + *, + db_url: Optional[str] = None, + fix: bool = False, + skip_docker: bool = False, + skip_server: bool = False, + verbose: bool = False, + ) -> None: + self.db_url = db_url + self.fix = fix + self.skip_docker = skip_docker + self.skip_server = skip_server + self.verbose = verbose + + def run_all(self) -> List[CheckResult]: + """Run all checks in order. Returns list of CheckResults.""" + results: List[CheckResult] = [] + results.append(self.check_python_version()) + results.append(self.check_env_vars()) + if not self.skip_docker: + results.append(self.check_docker()) + results.append(self.check_postgres()) + results.append(self.check_pgvector()) + results.append(self.run_migrations()) + if not self.skip_server: + results.append(self.start_server()) + results.append(self.verify_health()) + return results + + def check_python_version(self) -> CheckResult: + """Check Python >= 3.10.""" + major, minor = sys.version_info[:2] + if (major, minor) >= (3, 10): + return CheckResult( + name="python_version", + status="ok", + message=f"Python {major}.{minor} (>= 3.10)", + ) + return CheckResult( + name="python_version", + status="fail", + message=f"Python {major}.{minor} found, need >= 3.10", + fix_hint="Install Python 3.10+ from https://python.org", + ) + + def check_env_vars(self) -> CheckResult: + """Check DATABASE_URL is set.""" + import os + db_url = self.db_url or os.environ.get("DATABASE_URL") + if db_url: + return CheckResult( + name="env_vars", + status="ok", + message="DATABASE_URL is set", + ) + return CheckResult( + name="env_vars", + status="fail", + message="DATABASE_URL not set", + fix_hint="Set DATABASE_URL=postgresql://user:pass@localhost:5432/lore", + ) + + def check_docker(self) -> CheckResult: + """Check Docker is installed and running.""" + if not shutil.which("docker"): + hint = "Install Docker from https://docs.docker.com/get-docker/" + if self.fix: + return CheckResult( + name="docker", + status="fail", + message="Docker not found in PATH", + fix_hint=hint, + ) + return CheckResult( + name="docker", + status="fail", + message="Docker not found in PATH", + fix_hint=hint, + ) + try: + result = subprocess.run( + ["docker", "info"], + capture_output=True, + text=True, + timeout=10, + ) + if result.returncode == 0: + return CheckResult( + name="docker", + status="ok", + message="Docker is installed and running", + ) + return CheckResult( + name="docker", + status="warn", + message="Docker installed but daemon may not be running", + fix_hint="Start Docker daemon: sudo systemctl start docker", + ) + except (subprocess.TimeoutExpired, FileNotFoundError): + return CheckResult( + name="docker", + status="warn", + message="Docker check timed out", + fix_hint="Ensure Docker daemon is running", + ) + + def check_postgres(self) -> CheckResult: + """Check PostgreSQL is reachable.""" + import os + db_url = self.db_url or os.environ.get("DATABASE_URL", "") + if not db_url: + return CheckResult( + name="postgres", + status="fail", + message="Cannot check Postgres — no DATABASE_URL", + fix_hint="Set DATABASE_URL first", + ) + + # Try pg_isready if available + if shutil.which("pg_isready"): + try: + result = subprocess.run( + ["pg_isready", "-d", db_url], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + return CheckResult( + name="postgres", + status="ok", + message="PostgreSQL is accepting connections", + ) + except (subprocess.TimeoutExpired, FileNotFoundError): + pass + + # Try a direct connection via asyncpg + try: + import asyncpg # noqa: F811 + + async def _check(): + conn = await asyncpg.connect(db_url, timeout=5) + await conn.fetchval("SELECT 1") + await conn.close() + + asyncio.run(_check()) + return CheckResult( + name="postgres", + status="ok", + message="PostgreSQL connection successful", + ) + except ImportError: + return CheckResult( + name="postgres", + status="warn", + message="asyncpg not installed — cannot verify Postgres", + fix_hint="pip install asyncpg", + ) + except Exception as e: + hint = None + if self.fix: + hint = "Try: docker compose up -d db" + return CheckResult( + name="postgres", + status="fail", + message=f"Cannot connect to PostgreSQL: {e}", + fix_hint=hint or "Ensure PostgreSQL is running and DATABASE_URL is correct", + ) + + def check_pgvector(self) -> CheckResult: + """Check pgvector extension is installed.""" + import os + db_url = self.db_url or os.environ.get("DATABASE_URL", "") + if not db_url: + return CheckResult( + name="pgvector", + status="fail", + message="Cannot check pgvector — no DATABASE_URL", + ) + try: + import asyncpg + + async def _check(): + conn = await asyncpg.connect(db_url, timeout=5) + result = await conn.fetchval( + "SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = 'vector')" + ) + await conn.close() + return result + + has_vector = asyncio.run(_check()) + if has_vector: + return CheckResult( + name="pgvector", + status="ok", + message="pgvector extension is installed", + ) + return CheckResult( + name="pgvector", + status="fail", + message="pgvector extension not found", + fix_hint="Run: CREATE EXTENSION IF NOT EXISTS vector;", + ) + except ImportError: + return CheckResult( + name="pgvector", + status="warn", + message="asyncpg not installed — cannot verify pgvector", + fix_hint="pip install asyncpg", + ) + except Exception as e: + return CheckResult( + name="pgvector", + status="warn", + message=f"pgvector check failed: {e}", + ) + + def run_migrations(self) -> CheckResult: + """Run database migrations.""" + import os + db_url = self.db_url or os.environ.get("DATABASE_URL", "") + if not db_url: + return CheckResult( + name="migrations", + status="fail", + message="Cannot run migrations — no DATABASE_URL", + ) + try: + from lore.server.config import settings + from lore.server.db import close_pool, init_pool + from lore.server.db import run_migrations as _run_migrations + + async def _migrate(): + pool = await init_pool(db_url) + await _run_migrations(pool, settings.migrations_dir) + await close_pool() + + asyncio.run(_migrate()) + return CheckResult( + name="migrations", + status="ok", + message="Migrations completed successfully", + ) + except ImportError: + return CheckResult( + name="migrations", + status="warn", + message="Server dependencies not installed", + fix_hint="pip install lore-sdk[server]", + ) + except Exception as e: + return CheckResult( + name="migrations", + status="fail", + message=f"Migration failed: {e}", + ) + + def start_server(self) -> CheckResult: + """Start the Lore server (optional).""" + try: + import uvicorn # noqa: F401 + except ImportError: + return CheckResult( + name="server_start", + status="warn", + message="uvicorn not installed — skipping server start", + fix_hint="pip install lore-sdk[server]", + ) + # Just verify the module is importable, don't actually start + try: + import lore.server.app # noqa: F401 + return CheckResult( + name="server_start", + status="ok", + message="Server module is importable", + ) + except Exception as e: + return CheckResult( + name="server_start", + status="fail", + message=f"Server import failed: {e}", + ) + + def verify_health(self) -> CheckResult: + """Verify the /ready endpoint (if server is running).""" + import os + import urllib.error + import urllib.request + + port = os.environ.get("LORE_PORT", "8765") + url = f"http://localhost:{port}/ready" + try: + with urllib.request.urlopen(url, timeout=3) as resp: + if resp.status == 200: + return CheckResult( + name="health", + status="ok", + message=f"Server healthy at {url}", + ) + return CheckResult( + name="health", + status="warn", + message=f"Server responded with {resp.status}", + ) + except Exception: + return CheckResult( + name="health", + status="warn", + message="Server not reachable (may not be running)", + fix_hint=f"Start server: lore serve --port {port}", + ) + + +def format_results(results: List[CheckResult], verbose: bool = False) -> str: + """Format check results for display.""" + lines: List[str] = [] + icons = {"ok": "\u2713", "warn": "\u26a0", "fail": "\u2717"} + for r in results: + icon = icons.get(r.status, "?") + lines.append(f" [{icon}] {r.name}: {r.message}") + if r.fix_hint and (verbose or r.status == "fail"): + lines.append(f" Fix: {r.fix_hint}") + + ok_count = sum(1 for r in results if r.status == "ok") + warn_count = sum(1 for r in results if r.status == "warn") + fail_count = sum(1 for r in results if r.status == "fail") + lines.append("") + lines.append(f" Summary: {ok_count} passed, {warn_count} warnings, {fail_count} failed") + + if fail_count == 0: + lines.append(" Lore is ready!") + else: + lines.append(" Run with --fix to attempt auto-remediation.") + + return "\n".join(lines) diff --git a/src/lore/cli.py b/src/lore/cli.py index 42aec8a..8658a28 100644 --- a/src/lore/cli.py +++ b/src/lore/cli.py @@ -571,6 +571,12 @@ def build_parser() -> argparse.ArgumentParser: p_setup.add_argument("--remove", default=None, metavar="RUNTIME", choices=["claude-code", "openclaw", "cursor", "codex"], help="Remove hooks for a runtime") + p_setup.add_argument("--validate", action="store_true", + help="Validate hook scripts and config files after setup") + p_setup.add_argument("--test-connection", action="store_true", dest="test_connection", + help="Test connectivity to the Lore server") + p_setup.add_argument("--dry-run", action="store_true", dest="setup_dry_run", + help="Show what would be done without making changes") # export p = sub.add_parser("export", help="Export memories and knowledge graph") @@ -631,6 +637,129 @@ def build_parser() -> argparse.ArgumentParser: p_review.add_argument("--reject-all", action="store_true", dest="reject_all", help="Reject all pending") p_review.add_argument("--limit", type=int, default=50, help="Max items to show (default: 50)") + # slo + slo_parser = sub.add_parser("slo", help="Manage SLO definitions and alerts") + slo_parser.add_argument("--api-url", default=None, help="Lore API URL (or LORE_API_URL)") + slo_parser.add_argument("--api-key", default=None, help="Lore API key (or LORE_API_KEY)") + slo_sub = slo_parser.add_subparsers(dest="slo_command") + + slo_sub.add_parser("list", help="List SLO definitions") + slo_sub.add_parser("status", help="Show current SLO pass/fail status") + slo_sub.add_parser("alerts", help="Show alert history") + + slo_c = slo_sub.add_parser("create", help="Create an SLO") + slo_c.add_argument("--name", required=True, dest="slo_name") + slo_c.add_argument("--metric", required=True, choices=["p50_latency", "p95_latency", "p99_latency", "hit_rate"]) + slo_c.add_argument("--threshold", required=True, type=float) + slo_c.add_argument("--operator", default="lt", choices=["lt", "gt"]) + slo_c.add_argument("--window", type=int, default=60, dest="window_minutes") + + slo_d = slo_sub.add_parser("delete", help="Delete an SLO") + slo_d.add_argument("slo_id", help="SLO ID to delete") + + slo_t = slo_sub.add_parser("test", help="Fire a test alert") + slo_t.add_argument("slo_id", help="SLO ID to test") + + # profiles + prof_parser = sub.add_parser("profiles", help="Manage retrieval profiles") + prof_parser.add_argument("--api-url", default=None, help="Lore API URL") + prof_parser.add_argument("--api-key", default=None, help="Lore API key") + prof_sub = prof_parser.add_subparsers(dest="prof_command") + + prof_sub.add_parser("list", help="List profiles") + prof_cr = prof_sub.add_parser("create", help="Create a profile") + prof_cr.add_argument("--name", required=True) + prof_cr.add_argument("--semantic-weight", type=float, default=1.0, dest="semantic_weight") + prof_cr.add_argument("--graph-weight", type=float, default=1.0, dest="graph_weight") + prof_cr.add_argument("--recency-bias", type=float, default=30.0, dest="recency_bias") + prof_cr.add_argument("--min-score", type=float, default=0.3, dest="min_score") + prof_cr.add_argument("--max-results", type=int, default=10, dest="max_results") + + prof_del = prof_sub.add_parser("delete", help="Delete a profile") + prof_del.add_argument("profile_id") + + # policy + pol_parser = sub.add_parser("policy", help="Manage retention policies") + pol_parser.add_argument("--api-url", default=None, help="Lore API URL") + pol_parser.add_argument("--api-key", default=None, help="Lore API key") + pol_sub = pol_parser.add_subparsers(dest="pol_command") + + pol_sub.add_parser("list", help="List policies") + pol_sub.add_parser("compliance", help="Check policy compliance") + pol_cr = pol_sub.add_parser("create", help="Create a policy") + pol_cr.add_argument("--name", required=True) + pol_cr.add_argument("--snapshot-schedule", default=None, dest="snapshot_schedule") + pol_cr.add_argument("--max-snapshots", type=int, default=50, dest="max_snapshots") + + pol_del = pol_sub.add_parser("delete", help="Delete a policy") + pol_del.add_argument("policy_id") + + # restore-drill + p_drill = sub.add_parser("restore-drill", help="Run a restore drill") + p_drill.add_argument("snapshot_name", nargs="?", default=None, help="Snapshot name to drill") + p_drill.add_argument("--latest", action="store_true", help="Use latest snapshot") + p_drill.add_argument("--api-url", default=None, help="Lore API URL") + p_drill.add_argument("--api-key", default=None, help="Lore API key") + + # workspace + ws_parser = sub.add_parser("workspace", help="Manage workspaces") + ws_parser.add_argument("--api-url", default=None, help="Lore API URL") + ws_parser.add_argument("--api-key", default=None, help="Lore API key") + ws_sub = ws_parser.add_subparsers(dest="ws_command") + + ws_sub.add_parser("list", help="List workspaces") + ws_cr = ws_sub.add_parser("create", help="Create a workspace") + ws_cr.add_argument("name", help="Workspace name") + ws_cr.add_argument("--slug", default=None, help="URL slug (auto-generated if omitted)") + + ws_sw = ws_sub.add_parser("switch", help="Switch to a workspace") + ws_sw.add_argument("slug", help="Workspace slug") + + ws_mem = ws_sub.add_parser("members", help="List workspace members") + ws_mem.add_argument("--workspace", default=None, help="Workspace slug") + + # audit + p_audit = sub.add_parser("audit", help="Query audit log") + p_audit.add_argument("--workspace", default=None, help="Filter by workspace slug") + p_audit.add_argument("--since", default=None, help="ISO 8601 start time") + p_audit.add_argument("--limit", type=int, default=50) + p_audit.add_argument("--api-url", default=None, help="Lore API URL") + p_audit.add_argument("--api-key", default=None, help="Lore API key") + + # plugin + plug_parser = sub.add_parser("plugin", help="Manage plugins") + plug_sub = plug_parser.add_subparsers(dest="plug_command") + + plug_sub.add_parser("list", help="List installed plugins") + plug_cr = plug_sub.add_parser("create", help="Scaffold a new plugin project") + plug_cr.add_argument("name", help="Plugin name") + plug_cr.add_argument("--output", default=".", help="Output directory") + + plug_en = plug_sub.add_parser("enable", help="Enable a plugin") + plug_en.add_argument("name") + plug_dis = plug_sub.add_parser("disable", help="Disable a plugin") + plug_dis.add_argument("name") + plug_rel = plug_sub.add_parser("reload", help="Reload a plugin") + plug_rel.add_argument("name") + + # suggest + p_suggest = sub.add_parser("suggest", help="Get proactive memory suggestions") + p_suggest.add_argument("--context", default="", help="Session context text") + p_suggest.add_argument("--feedback", nargs=2, metavar=("MEMORY_ID", "FEEDBACK"), default=None, + help="Submit feedback: positive|negative") + p_suggest.add_argument("--config", action="store_true", dest="show_config", + help="Show recommendation config") + p_suggest.add_argument("--aggressiveness", type=float, default=None, + help="Set aggressiveness (0.0-1.0)") + + # bootstrap + p_boot = sub.add_parser("bootstrap", help="Validate prerequisites and set up Lore") + p_boot.add_argument("--fix", action="store_true", help="Attempt to auto-fix missing dependencies") + p_boot.add_argument("--skip-docker", action="store_true", dest="skip_docker", help="Skip Docker check") + p_boot.add_argument("--skip-server", action="store_true", dest="skip_server", help="Skip server start/health check") + p_boot.add_argument("--db-url", default=None, dest="db_url", help="Database URL (or DATABASE_URL env)") + p_boot.add_argument("--verbose", action="store_true", help="Show all fix hints") + # serve p_serve = sub.add_parser("serve", help="Start Lore HTTP server") p_serve.add_argument("--host", default="0.0.0.0", help="Bind address (default: 0.0.0.0)") @@ -1313,7 +1442,16 @@ def cmd_wrap(args: argparse.Namespace) -> None: def cmd_setup(args: argparse.Namespace) -> None: """Handle setup subcommand: install/remove hooks for runtimes.""" - from lore.setup import remove_runtime, setup_claude_code, setup_codex, setup_cursor, setup_openclaw, show_status + from lore.setup import ( + _test_connection, + _validate_hook, + remove_runtime, + setup_claude_code, + setup_codex, + setup_cursor, + setup_openclaw, + show_status, + ) if args.status: show_status() @@ -1323,12 +1461,33 @@ def cmd_setup(args: argparse.Namespace) -> None: remove_runtime(args.remove) return + # Test connection mode (standalone) + if getattr(args, "test_connection", False) and not args.runtime: + server_url = args.server_url or "http://localhost:8765" + api_key = args.api_key + print(f"Testing connection to {server_url}...") + result = _test_connection(server_url, api_key) + print(f" Status: {result['status']}") + print(f" Health: {'ok' if result.get('health') else 'fail'}") + print(f" Retrieve: {'ok' if result.get('retrieve') else 'skip/fail'}") + print(f" Latency: {result.get('latency_ms', 0):.1f}ms") + if result.get("error"): + print(f" Error: {result['error']}") + return + if not args.runtime: print("Usage: lore setup [--server-url URL]", file=sys.stderr) print(" lore setup --status", file=sys.stderr) print(" lore setup --remove ", file=sys.stderr) + print(" lore setup --test-connection [--server-url URL]", file=sys.stderr) sys.exit(1) + if getattr(args, "setup_dry_run", False): + print(f"[dry-run] Would set up {args.runtime}") + print(f"[dry-run] Server URL: {args.server_url or 'http://localhost:8765'}") + print(f"[dry-run] API Key: {'set' if args.api_key else 'not set'}") + return + server_url = args.server_url or "http://localhost:8765" api_key = args.api_key @@ -1341,6 +1500,34 @@ def cmd_setup(args: argparse.Namespace) -> None: elif args.runtime == "codex": setup_codex(server_url=server_url, api_key=api_key) + # Post-setup validation + if getattr(args, "validate", False): + print("\nValidating...") + from lore.setup import _claude_hook_path, _codex_hook_path, _cursor_hook_path, _openclaw_hook_path + hook_paths = { + "claude-code": _claude_hook_path, + "cursor": _cursor_hook_path, + "codex": _codex_hook_path, + "openclaw": _openclaw_hook_path, + } + hook_fn = hook_paths.get(args.runtime) + if hook_fn: + errors = _validate_hook(hook_fn()) + if errors: + for err in errors: + print(f" Warning: {err}") + else: + print(" Hook validation: ok") + + # Post-setup connection test + if getattr(args, "test_connection", False): + print("\nTesting connection...") + result = _test_connection(server_url, api_key) + print(f" Status: {result['status']}") + print(f" Latency: {result.get('latency_ms', 0):.1f}ms") + if not result.get("health"): + print(" Warning: Server not reachable. Start it with: lore serve") + def cmd_export(args: argparse.Namespace) -> None: lore = _get_lore(args.db) @@ -1629,6 +1816,251 @@ def cmd_review(args: argparse.Namespace) -> None: print("Use --approve-all or --reject-all for bulk actions.") +def cmd_bootstrap(args: argparse.Namespace) -> None: + """Run guided bootstrap checks.""" + from lore.bootstrap import BootstrapRunner, format_results + + runner = BootstrapRunner( + db_url=args.db_url, + fix=args.fix, + skip_docker=args.skip_docker, + skip_server=args.skip_server, + verbose=args.verbose, + ) + print("Lore Bootstrap") + print("=" * 40) + results = runner.run_all() + print(format_results(results, verbose=args.verbose)) + + has_failures = any(r.status == "fail" for r in results) + if has_failures: + sys.exit(1) + + +def cmd_slo(args: argparse.Namespace) -> None: + """Handle SLO subcommands.""" + api_url, api_key = _get_api_config(args) + cmd = getattr(args, "slo_command", None) + if not cmd: + print("Usage: lore slo ", file=sys.stderr) + sys.exit(1) + if cmd == "list": + result = _api_request("GET", f"{api_url}/v1/slo", api_key) + for s in result: + print(f" {s['id']} {s['name']} {s['metric']} {s['operator']} {s['threshold']} {'enabled' if s.get('enabled') else 'disabled'}") + elif cmd == "status": + result = _api_request("GET", f"{api_url}/v1/slo/status", api_key) + for s in result: + icon = "PASS" if s.get("passing") else "FAIL" + val = s.get("current_value") + val_str = f"{val:.2f}" if val is not None else "N/A" + print(f" [{icon}] {s['name']}: {val_str} ({s['operator']} {s['threshold']})") + elif cmd == "alerts": + result = _api_request("GET", f"{api_url}/v1/slo/alerts?limit=20", api_key) + for a in result: + print(f" [{a['status']}] {a['metric_value']:.2f} / {a['threshold']:.2f} ({a.get('created_at', '')})") + elif cmd == "create": + payload = { + "name": args.slo_name, "metric": args.metric, + "threshold": args.threshold, "operator": args.operator, + "window_minutes": args.window_minutes, + } + result = _api_request("POST", f"{api_url}/v1/slo", api_key, payload) + print(f"Created SLO: {result['id']} ({result['name']})") + elif cmd == "delete": + _api_request("DELETE", f"{api_url}/v1/slo/{args.slo_id}", api_key) + print(f"Deleted SLO: {args.slo_id}") + elif cmd == "test": + result = _api_request("POST", f"{api_url}/v1/slo/{args.slo_id}/test", api_key) + print(f"Test alert fired: {result.get('status', 'unknown')}") + + +def cmd_profiles(args: argparse.Namespace) -> None: + """Handle profiles subcommands.""" + api_url, api_key = _get_api_config(args) + cmd = getattr(args, "prof_command", None) + if not cmd: + print("Usage: lore profiles ", file=sys.stderr) + sys.exit(1) + if cmd == "list": + result = _api_request("GET", f"{api_url}/v1/profiles", api_key) + for p in result: + preset = " [preset]" if p.get("is_preset") else "" + print(f" {p['id']} {p['name']}{preset} sw={p['semantic_weight']} gw={p['graph_weight']} rb={p['recency_bias']}") + elif cmd == "create": + payload = { + "name": args.name, "semantic_weight": args.semantic_weight, + "graph_weight": args.graph_weight, "recency_bias": args.recency_bias, + "min_score": args.min_score, "max_results": args.max_results, + } + result = _api_request("POST", f"{api_url}/v1/profiles", api_key, payload) + print(f"Created profile: {result['id']} ({result['name']})") + elif cmd == "delete": + _api_request("DELETE", f"{api_url}/v1/profiles/{args.profile_id}", api_key) + print(f"Deleted profile: {args.profile_id}") + + +def cmd_policy(args: argparse.Namespace) -> None: + """Handle policy subcommands.""" + api_url, api_key = _get_api_config(args) + cmd = getattr(args, "pol_command", None) + if not cmd: + print("Usage: lore policy ", file=sys.stderr) + sys.exit(1) + if cmd == "list": + result = _api_request("GET", f"{api_url}/v1/policies", api_key) + for p in result: + active = "active" if p.get("is_active") else "inactive" + print(f" {p['id']} {p['name']} [{active}] max_snapshots={p.get('max_snapshots', 50)}") + elif cmd == "compliance": + result = _api_request("GET", f"{api_url}/v1/policies/compliance", api_key) + for c in result: + status = "COMPLIANT" if c.get("compliant") else "NON-COMPLIANT" + print(f" [{status}] {c['policy_name']}") + for issue in c.get("issues", []): + print(f" - {issue}") + elif cmd == "create": + payload = { + "name": args.name, + "snapshot_schedule": args.snapshot_schedule, + "max_snapshots": args.max_snapshots, + } + result = _api_request("POST", f"{api_url}/v1/policies", api_key, payload) + print(f"Created policy: {result['id']} ({result['name']})") + elif cmd == "delete": + _api_request("DELETE", f"{api_url}/v1/policies/{args.policy_id}", api_key) + print(f"Deleted policy: {args.policy_id}") + + +def cmd_workspace(args: argparse.Namespace) -> None: + """Handle workspace subcommands.""" + api_url, api_key = _get_api_config(args) + cmd = getattr(args, "ws_command", None) + if not cmd: + print("Usage: lore workspace ", file=sys.stderr) + sys.exit(1) + if cmd == "list": + result = _api_request("GET", f"{api_url}/v1/workspaces", api_key) + for w in result: + print(f" {w['slug']:<20} {w['name']}") + elif cmd == "create": + slug = args.slug or args.name.lower().replace(" ", "-") + payload = {"name": args.name, "slug": slug} + result = _api_request("POST", f"{api_url}/v1/workspaces", api_key, payload) + print(f"Created workspace: {result['slug']}") + elif cmd == "switch": + print(f"Switched to workspace: {args.slug}") + print(f"Set LORE_WORKSPACE={args.slug} in your environment to persist.") + elif cmd == "members": + ws = args.workspace or "default" + result = _api_request("GET", f"{api_url}/v1/workspaces/{ws}/members", api_key) + for m in result: + print(f" {m.get('user_id', 'unknown'):<20} {m['role']}") + + +def cmd_audit(args: argparse.Namespace) -> None: + """Query audit log.""" + api_url, api_key = _get_api_config(args) + params = f"?limit={args.limit}" + if args.workspace: + params += f"&workspace_id={args.workspace}" + if args.since: + params += f"&since={args.since}" + result = _api_request("GET", f"{api_url}/v1/audit{params}", api_key) + for entry in result: + ts = entry.get("created_at", "")[:19] + print(f" [{ts}] {entry['action']} by {entry['actor_id']} ({entry['actor_type']})") + + +def cmd_plugin(args: argparse.Namespace) -> None: + """Handle plugin subcommands.""" + cmd = getattr(args, "plug_command", None) + if not cmd: + print("Usage: lore plugin ", file=sys.stderr) + sys.exit(1) + if cmd == "list": + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + registry.load_all() + plugins = registry.list_plugins() + if not plugins: + print("No plugins installed.") + return + for p in plugins: + status = "enabled" if p["enabled"] else "disabled" + print(f" {p['name']:<20} v{p['version']} [{status}] {p.get('description', '')}") + elif cmd == "create": + from lore.plugin.scaffold import scaffold_plugin + project_dir = scaffold_plugin(args.name, output_dir=args.output) + print(f"Plugin scaffolded: {project_dir}") + print(f" Install with: cd {project_dir} && pip install -e .") + elif cmd == "enable": + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + registry.load_all() + if registry.enable(args.name): + print(f"Enabled: {args.name}") + else: + print(f"Plugin not found: {args.name}", file=sys.stderr) + sys.exit(1) + elif cmd == "disable": + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + registry.load_all() + if registry.disable(args.name): + print(f"Disabled: {args.name}") + else: + print(f"Plugin not found: {args.name}", file=sys.stderr) + sys.exit(1) + elif cmd == "reload": + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + registry.load_all() + if registry.reload(args.name): + print(f"Reloaded: {args.name}") + else: + print(f"Plugin not found: {args.name}", file=sys.stderr) + sys.exit(1) + + +def cmd_suggest(args: argparse.Namespace) -> None: + """Get proactive memory suggestions.""" + if args.feedback: + memory_id, feedback = args.feedback + lore = _get_lore(args.db) + from lore.recommend.feedback import FeedbackRecorder + recorder = FeedbackRecorder() + recorder.record(memory_id, feedback, "cli-user") + lore.close() + print(f"Feedback recorded: {feedback} for {memory_id}") + return + + if args.show_config: + print("Recommendation config:") + print(f" Aggressiveness: {args.aggressiveness or 0.5}") + return + + lore = _get_lore(args.db) + from lore.recommend.engine import RecommendationEngine + engine = RecommendationEngine( + store=lore._store, + embedder=lore._embedder, + aggressiveness=args.aggressiveness or 0.5, + ) + recs = engine.suggest(context=args.context) + lore.close() + + if not recs: + print("No suggestions at this time.") + return + for i, rec in enumerate(recs, 1): + print(f" {i}. [{rec.score:.2f}] {rec.content_preview}") + if rec.explanation: + print(f" {rec.explanation}") + print(f" ID: {rec.memory_id}") + print() + + def main(argv: Optional[Sequence[str]] = None) -> None: parser = build_parser() args = parser.parse_args(argv) @@ -1649,6 +2081,18 @@ def main(argv: Optional[Sequence[str]] = None) -> None: keys_handlers[args.keys_command](args) return + # Subcommand groups that need special routing + if args.command in ("slo", "profiles", "policy", "workspace", "plugin"): + group_handlers = { + "slo": cmd_slo, + "profiles": cmd_profiles, + "policy": cmd_policy, + "workspace": cmd_workspace, + "plugin": cmd_plugin, + } + group_handlers[args.command](args) + return + handlers = { "remember": cmd_remember, "recall": cmd_recall, @@ -1681,6 +2125,10 @@ def main(argv: Optional[Sequence[str]] = None) -> None: "snapshot-save": cmd_snapshot_save, "topics": cmd_topics, "review": cmd_review, + "bootstrap": cmd_bootstrap, + "audit": cmd_audit, + "suggest": cmd_suggest, + "restore-drill": lambda a: print("Use: lore policy drill (via API)"), "serve": cmd_serve, "mcp": cmd_mcp, "ui": cmd_ui, diff --git a/src/lore/lore.py b/src/lore/lore.py index 2fc03b4..4c4593f 100644 --- a/src/lore/lore.py +++ b/src/lore/lore.py @@ -1888,6 +1888,41 @@ def import_data( ) + # ------------------------------------------------------------------ + # Proactive Recommendations (F9) + # ------------------------------------------------------------------ + + def suggest( + self, + context: str = "", + session_entities: Optional[List[str]] = None, + max_results: int = 3, + ) -> list: + """Get proactive memory suggestions based on context.""" + from lore.recommend.engine import RecommendationEngine + engine = RecommendationEngine( + store=self._store, + embedder=self._embedder, + max_suggestions=max_results, + ) + return engine.suggest( + context=context, + session_entities=session_entities, + limit=max_results, + ) + + def recommendation_feedback( + self, + memory_id: str, + feedback: str, + actor_id: str = "sdk-user", + ) -> None: + """Record feedback on a recommendation.""" + from lore.recommend.feedback import FeedbackRecorder + recorder = FeedbackRecorder() + recorder.record(memory_id, feedback, actor_id) + + def _utc_now_iso() -> str: """Return current UTC time as ISO 8601 string.""" return datetime.now(timezone.utc).isoformat() diff --git a/src/lore/mcp/server.py b/src/lore/mcp/server.py index 25008ed..64d5830 100644 --- a/src/lore/mcp/server.py +++ b/src/lore/mcp/server.py @@ -1510,6 +1510,46 @@ def review_connection( return f"Failed to review connection: {e}" +@mcp.tool( + description=( + "Get proactive memory suggestions based on current session context. " + "USE THIS WHEN: you want to surface potentially relevant memories " + "without a specific query. Useful at session start or before major decisions. " + "Returns memories ranked by multi-signal relevance." + ), +) +def suggest( + context: str = "", + max_results: int = 3, + session_entities: Optional[List[str]] = None, +) -> str: + """Get proactive memory suggestions based on current session context.""" + try: + lore = _get_lore() + from lore.recommend.engine import RecommendationEngine + engine = RecommendationEngine( + store=lore._store, + embedder=lore._embedder, + ) + recs = engine.suggest( + context=context, + session_entities=session_entities, + limit=max_results, + ) + if not recs: + return "No suggestions at this time." + lines = [f"Suggested {len(recs)} memory(ies):\n"] + for i, rec in enumerate(recs, 1): + lines.append(f"{i}. [{rec.score:.2f}] {rec.content_preview}") + if rec.explanation: + lines.append(f" {rec.explanation}") + lines.append(f" ID: {rec.memory_id}") + lines.append("") + return "\n".join(lines) + except Exception as e: + return f"Suggestion failed: {e}" + + def run_server() -> None: """Start the MCP server with stdio transport.""" mcp.run(transport="stdio") diff --git a/src/lore/plugin/__init__.py b/src/lore/plugin/__init__.py new file mode 100644 index 0000000..14f8beb --- /dev/null +++ b/src/lore/plugin/__init__.py @@ -0,0 +1 @@ +"""Lore Plugin SDK — extensibility framework.""" diff --git a/src/lore/plugin/base.py b/src/lore/plugin/base.py new file mode 100644 index 0000000..78df000 --- /dev/null +++ b/src/lore/plugin/base.py @@ -0,0 +1,51 @@ +"""Plugin base class and metadata.""" + +from __future__ import annotations + +from abc import ABC +from dataclasses import dataclass +from typing import Any, Dict, List + + +@dataclass +class PluginMeta: + """Plugin metadata.""" + name: str + version: str + description: str = "" + lore_sdk_version: str = ">=1.0.0" + priority: int = 100 # lower = runs first + + +class LorePlugin(ABC): + """Abstract base class for Lore plugins. + + Subclass and implement any hooks you need. Unimplemented hooks + are no-ops that pass data through unchanged. + """ + + meta: PluginMeta + + def on_remember(self, memory: Any) -> Any: + """Called after a memory is saved. Return the (possibly modified) memory.""" + return memory + + def on_recall(self, query: str, results: List[Any]) -> List[Any]: + """Called after recall results are scored. Return modified results.""" + return results + + def on_enrich(self, memory: Any, enrichment: Dict[str, Any]) -> Dict[str, Any]: + """Called after enrichment. Return modified enrichment dict.""" + return enrichment + + def on_extract(self, memory: Any, facts: List[Any]) -> List[Any]: + """Called after fact extraction. Return modified facts.""" + return facts + + def on_score(self, memory: Any, score: float) -> float: + """Called during scoring. Return modified score.""" + return score + + def cleanup(self) -> None: + """Called when the plugin is unloaded or the system shuts down.""" + pass diff --git a/src/lore/plugin/harness.py b/src/lore/plugin/harness.py new file mode 100644 index 0000000..b874191 --- /dev/null +++ b/src/lore/plugin/harness.py @@ -0,0 +1,55 @@ +"""Test harness for Lore plugins.""" + +from __future__ import annotations + +from typing import Any, Dict, List + +from lore.plugin.base import LorePlugin +from lore.store.memory import MemoryStore +from lore.types import Memory + + +class PluginTestHarness: + """Provides a mock Lore environment for testing plugins.""" + + def __init__(self, plugin: LorePlugin) -> None: + self.plugin = plugin + self.store = MemoryStore() + self.memories: List[Memory] = [] + + def add_test_memory(self, content: str, **kwargs) -> Memory: + from datetime import datetime, timezone + + from ulid import ULID + + memory = Memory( + id=str(ULID()), + content=content, + created_at=datetime.now(timezone.utc).isoformat(), + updated_at=datetime.now(timezone.utc).isoformat(), + **kwargs, + ) + self.store.save(memory) + self.memories.append(memory) + return memory + + def test_on_remember(self, memory: Memory) -> Memory: + return self.plugin.on_remember(memory) + + def test_on_recall(self, query: str, results: list) -> list: + return self.plugin.on_recall(query, results) + + def test_on_score(self, memory: Memory, score: float) -> float: + return self.plugin.on_score(memory, score) + + def run_all_hooks(self) -> Dict[str, Any]: + """Run all hooks with test data and return results.""" + results: Dict[str, Any] = {} + + if self.memories: + mem = self.memories[0] + results["on_remember"] = self.test_on_remember(mem) + results["on_recall"] = self.test_on_recall("test query", [mem]) + results["on_score"] = self.test_on_score(mem, 0.85) + + return results diff --git a/src/lore/plugin/hooks.py b/src/lore/plugin/hooks.py new file mode 100644 index 0000000..0d6a290 --- /dev/null +++ b/src/lore/plugin/hooks.py @@ -0,0 +1,83 @@ +"""Hook dispatcher — calls plugin hooks with timeout and error isolation.""" + +from __future__ import annotations + +import logging +import threading +from typing import Any, Dict, List + +logger = logging.getLogger(__name__) + +HOOK_TIMEOUT_SECONDS = 5 + + +def dispatch_on_remember(plugins: List[Any], memory: Any) -> Any: + """Call on_remember on all active plugins.""" + for plugin in plugins: + try: + memory = _call_with_timeout(plugin.on_remember, memory) + except Exception: + logger.warning("Plugin %s on_remember failed", plugin.meta.name, exc_info=True) + return memory + + +def dispatch_on_recall(plugins: List[Any], query: str, results: List[Any]) -> List[Any]: + """Call on_recall on all active plugins.""" + for plugin in plugins: + try: + results = _call_with_timeout(plugin.on_recall, query, results) + except Exception: + logger.warning("Plugin %s on_recall failed", plugin.meta.name, exc_info=True) + return results + + +def dispatch_on_enrich(plugins: List[Any], memory: Any, enrichment: Dict[str, Any]) -> Dict[str, Any]: + """Call on_enrich on all active plugins.""" + for plugin in plugins: + try: + enrichment = _call_with_timeout(plugin.on_enrich, memory, enrichment) + except Exception: + logger.warning("Plugin %s on_enrich failed", plugin.meta.name, exc_info=True) + return enrichment + + +def dispatch_on_extract(plugins: List[Any], memory: Any, facts: List[Any]) -> List[Any]: + """Call on_extract on all active plugins.""" + for plugin in plugins: + try: + facts = _call_with_timeout(plugin.on_extract, memory, facts) + except Exception: + logger.warning("Plugin %s on_extract failed", plugin.meta.name, exc_info=True) + return facts + + +def dispatch_on_score(plugins: List[Any], memory: Any, score: float) -> float: + """Call on_score on all active plugins.""" + for plugin in plugins: + try: + score = _call_with_timeout(plugin.on_score, memory, score) + except Exception: + logger.warning("Plugin %s on_score failed", plugin.meta.name, exc_info=True) + return score + + +def _call_with_timeout(fn, *args, timeout: float = HOOK_TIMEOUT_SECONDS): + """Call a function with a timeout. Returns result or raises TimeoutError.""" + result = [None] + error = [None] + + def _run(): + try: + result[0] = fn(*args) + except Exception as e: + error[0] = e + + thread = threading.Thread(target=_run, daemon=True) + thread.start() + thread.join(timeout=timeout) + + if thread.is_alive(): + raise TimeoutError(f"Plugin hook timed out after {timeout}s") + if error[0]: + raise error[0] + return result[0] diff --git a/src/lore/plugin/registry.py b/src/lore/plugin/registry.py new file mode 100644 index 0000000..66784a2 --- /dev/null +++ b/src/lore/plugin/registry.py @@ -0,0 +1,128 @@ +"""Plugin discovery, registration, and lifecycle management.""" + +from __future__ import annotations + +import importlib +import importlib.metadata +import logging +from typing import Dict, List, Optional + +from lore.plugin.base import LorePlugin, PluginMeta + +logger = logging.getLogger(__name__) + +ENTRY_POINT_GROUP = "lore.plugins" + + +class PluginRegistry: + """Discovers, loads, and manages Lore plugins.""" + + def __init__(self) -> None: + self._plugins: Dict[str, LorePlugin] = {} + self._disabled: set = set() + + def discover(self) -> List[PluginMeta]: + """Discover plugins via entry_points.""" + discovered: List[PluginMeta] = [] + try: + eps = importlib.metadata.entry_points() + if hasattr(eps, "select"): + group = eps.select(group=ENTRY_POINT_GROUP) + else: + group = eps.get(ENTRY_POINT_GROUP, []) + + for ep in group: + try: + plugin_cls = ep.load() + if hasattr(plugin_cls, "meta"): + discovered.append(plugin_cls.meta) + except Exception: + logger.warning("Failed to load plugin: %s", ep.name, exc_info=True) + except Exception: + logger.warning("Plugin discovery failed", exc_info=True) + return discovered + + def load_all(self) -> int: + """Load all discovered plugins. Returns count loaded.""" + count = 0 + try: + eps = importlib.metadata.entry_points() + if hasattr(eps, "select"): + group = eps.select(group=ENTRY_POINT_GROUP) + else: + group = eps.get(ENTRY_POINT_GROUP, []) + + for ep in group: + try: + plugin_cls = ep.load() + plugin = plugin_cls() + if hasattr(plugin, "meta") and isinstance(plugin, LorePlugin): + self._plugins[plugin.meta.name] = plugin + count += 1 + logger.info("Loaded plugin: %s v%s", plugin.meta.name, plugin.meta.version) + except Exception: + logger.warning("Failed to load plugin: %s", ep.name, exc_info=True) + except Exception: + logger.warning("Plugin loading failed", exc_info=True) + return count + + def get(self, name: str) -> Optional[LorePlugin]: + """Get a loaded plugin by name.""" + return self._plugins.get(name) + + def list_plugins(self) -> List[Dict]: + """List all loaded plugins with status.""" + return [ + { + "name": p.meta.name, + "version": p.meta.version, + "description": p.meta.description, + "priority": p.meta.priority, + "enabled": p.meta.name not in self._disabled, + } + for p in sorted(self._plugins.values(), key=lambda p: p.meta.priority) + ] + + def enable(self, name: str) -> bool: + if name in self._plugins: + self._disabled.discard(name) + return True + return False + + def disable(self, name: str) -> bool: + if name in self._plugins: + self._disabled.add(name) + return True + return False + + def reload(self, name: str) -> bool: + """Hot-reload a plugin by re-importing its module.""" + plugin = self._plugins.get(name) + if not plugin: + return False + try: + module = importlib.import_module(type(plugin).__module__) + importlib.reload(module) + plugin_cls = getattr(module, type(plugin).__name__) + new_plugin = plugin_cls() + self._plugins[name] = new_plugin + logger.info("Reloaded plugin: %s", name) + return True + except Exception: + logger.warning("Failed to reload plugin: %s", name, exc_info=True) + return False + + def get_active(self) -> List[LorePlugin]: + """Return active (enabled) plugins sorted by priority.""" + return [ + p for p in sorted(self._plugins.values(), key=lambda p: p.meta.priority) + if p.meta.name not in self._disabled + ] + + def cleanup_all(self) -> None: + """Call cleanup on all plugins.""" + for plugin in self._plugins.values(): + try: + plugin.cleanup() + except Exception: + logger.warning("Plugin cleanup failed: %s", plugin.meta.name, exc_info=True) diff --git a/src/lore/plugin/scaffold.py b/src/lore/plugin/scaffold.py new file mode 100644 index 0000000..54f6995 --- /dev/null +++ b/src/lore/plugin/scaffold.py @@ -0,0 +1,100 @@ +"""Plugin scaffold generator — creates project template for new plugins.""" + +from __future__ import annotations + +from pathlib import Path + +PYPROJECT_TEMPLATE = '''[build-system] +requires = ["setuptools>=68.0"] +build-backend = "setuptools.backends._legacy:_Backend" + +[project] +name = "lore-plugin-{name}" +version = "0.1.0" +description = "Lore plugin: {name}" +requires-python = ">=3.10" +dependencies = ["lore-sdk>=1.0.0"] + +[project.entry-points."lore.plugins"] +{name} = "{module}.plugin:{class_name}" +''' + +PLUGIN_TEMPLATE = '''"""Lore plugin: {name}.""" + +from lore.plugin.base import LorePlugin, PluginMeta + + +class {class_name}(LorePlugin): + """Custom Lore plugin — {name}.""" + + meta = PluginMeta( + name="{name}", + version="0.1.0", + description="{name} plugin for Lore", + priority=100, + ) + + def on_remember(self, memory): + """Called after a memory is saved.""" + return memory + + def on_recall(self, query, results): + """Called after recall results are scored.""" + return results +''' + +TEST_TEMPLATE = '''"""Tests for {name} plugin.""" + +from {module}.plugin import {class_name} + + +def test_plugin_meta(): + plugin = {class_name}() + assert plugin.meta.name == "{name}" + assert plugin.meta.version == "0.1.0" + + +def test_on_remember_passthrough(): + plugin = {class_name}() + memory = {{"id": "test", "content": "hello"}} + result = plugin.on_remember(memory) + assert result == memory + + +def test_on_recall_passthrough(): + plugin = {class_name}() + results = [{{"memory": "test", "score": 0.9}}] + out = plugin.on_recall("query", results) + assert out == results +''' + + +def scaffold_plugin(name: str, output_dir: str = ".") -> Path: + """Generate a new plugin project from templates.""" + # Normalize name + slug = name.lower().replace(" ", "-").replace("_", "-") + module = slug.replace("-", "_") + class_name = "".join(w.capitalize() for w in slug.split("-")) + "Plugin" + + project_dir = Path(output_dir) / f"lore-plugin-{slug}" + src_dir = project_dir / module + tests_dir = project_dir / "tests" + + # Create directories + src_dir.mkdir(parents=True, exist_ok=True) + tests_dir.mkdir(parents=True, exist_ok=True) + + # Write files + (project_dir / "pyproject.toml").write_text( + PYPROJECT_TEMPLATE.format(name=slug, module=module, class_name=class_name) + ) + (src_dir / "__init__.py").write_text("") + (src_dir / "plugin.py").write_text( + PLUGIN_TEMPLATE.format(name=slug, class_name=class_name) + ) + (tests_dir / "__init__.py").write_text("") + (tests_dir / f"test_{module}.py").write_text( + TEST_TEMPLATE.format(name=slug, module=module, class_name=class_name) + ) + + return project_dir diff --git a/src/lore/recommend/__init__.py b/src/lore/recommend/__init__.py new file mode 100644 index 0000000..d8bacf2 --- /dev/null +++ b/src/lore/recommend/__init__.py @@ -0,0 +1 @@ +"""Proactive memory recommendations.""" diff --git a/src/lore/recommend/engine.py b/src/lore/recommend/engine.py new file mode 100644 index 0000000..105bcaf --- /dev/null +++ b/src/lore/recommend/engine.py @@ -0,0 +1,133 @@ +"""Recommendation engine — multi-signal scoring.""" + +from __future__ import annotations + +import logging +from typing import Any, Dict, List, Optional + +from lore.recommend.types import Recommendation, RecommendationSignal + +logger = logging.getLogger(__name__) + +# Default signal weights +DEFAULT_WEIGHTS = { + "context_similarity": 0.4, + "entity_overlap": 0.25, + "temporal_pattern": 0.1, + "access_pattern": 0.15, + "graph_proximity": 0.1, +} + + +class RecommendationEngine: + """Generate proactive memory recommendations.""" + + def __init__( + self, + store: Any, + embedder: Any, + weights: Optional[Dict[str, float]] = None, + aggressiveness: float = 0.5, + max_suggestions: int = 3, + ) -> None: + self.store = store + self.embedder = embedder + self.weights = weights or dict(DEFAULT_WEIGHTS) + self.aggressiveness = aggressiveness + self.max_suggestions = max_suggestions + + def suggest( + self, + context: str = "", + session_entities: Optional[List[str]] = None, + exclude_ids: Optional[set] = None, + limit: Optional[int] = None, + ) -> List[Recommendation]: + """Generate recommendations based on session context.""" + from lore.recommend import signals + + limit = limit or self.max_suggestions + exclude = exclude_ids or set() + + # Embed context + context_vec = self.embedder.embed(context) if context else None + + # Get candidate memories + candidates = self.store.list(limit=500) + candidates = [m for m in candidates if m.id not in exclude and m.embedding] + + if not candidates: + return [] + + recommendations: List[Recommendation] = [] + + for memory in candidates: + signal_scores: List[RecommendationSignal] = [] + total_score = 0.0 + + # Context similarity + if context_vec and memory.embedding: + score, explanation = signals.context_similarity(context_vec, memory.embedding) + weight = self.weights.get("context_similarity", 0.4) + signal_scores.append(RecommendationSignal( + name="context_similarity", score=score, + weight=weight, explanation=explanation, + )) + total_score += score * weight + + # Entity overlap + if session_entities: + memory_entities = [] + enrichment = (memory.metadata or {}).get("enrichment", {}) + for e in enrichment.get("entities", []): + memory_entities.append(e.get("name", "")) + score, explanation = signals.entity_overlap(session_entities, memory_entities) + weight = self.weights.get("entity_overlap", 0.25) + signal_scores.append(RecommendationSignal( + name="entity_overlap", score=score, + weight=weight, explanation=explanation, + )) + total_score += score * weight + + # Temporal pattern + if memory.created_at: + score, explanation = signals.temporal_pattern(memory.created_at) + weight = self.weights.get("temporal_pattern", 0.1) + signal_scores.append(RecommendationSignal( + name="temporal_pattern", score=score, + weight=weight, explanation=explanation, + )) + total_score += score * weight + + # Access pattern + score, explanation = signals.access_pattern( + memory.access_count, memory.last_accessed_at, + ) + weight = self.weights.get("access_pattern", 0.15) + signal_scores.append(RecommendationSignal( + name="access_pattern", score=score, + weight=weight, explanation=explanation, + )) + total_score += score * weight + + # Filter by aggressiveness threshold + threshold = 1.0 - self.aggressiveness + if total_score >= threshold * 0.5: + preview = memory.content[:150] + "..." if len(memory.content) > 150 else memory.content + recommendations.append(Recommendation( + memory_id=memory.id, + content_preview=preview, + score=total_score, + signals=signal_scores, + explanation=self._build_explanation(signal_scores), + )) + + # Sort by score and return top N + recommendations.sort(key=lambda r: r.score, reverse=True) + return recommendations[:limit] + + def _build_explanation(self, signals: List[RecommendationSignal]) -> str: + """Build a human-readable explanation from top signals.""" + top = sorted(signals, key=lambda s: s.score * s.weight, reverse=True)[:3] + parts = [s.explanation for s in top if s.score > 0] + return "; ".join(parts) if parts else "Low relevance" diff --git a/src/lore/recommend/explainer.py b/src/lore/recommend/explainer.py new file mode 100644 index 0000000..74b2c48 --- /dev/null +++ b/src/lore/recommend/explainer.py @@ -0,0 +1,27 @@ +"""Build human-readable explanations from recommendation signals.""" + +from __future__ import annotations + +from typing import List + +from lore.recommend.types import RecommendationSignal + + +def explain(signals: List[RecommendationSignal], top_n: int = 3) -> str: + """Build a human-readable explanation from top signal factors.""" + if not signals: + return "No strong signals found." + + # Sort by contribution (score * weight) + ranked = sorted(signals, key=lambda s: s.score * s.weight, reverse=True) + top = [s for s in ranked[:top_n] if s.score > 0] + + if not top: + return "Weak match across all signals." + + parts = [] + for s in top: + contribution = s.score * s.weight + parts.append(f"{s.explanation} (contribution: {contribution:.2f})") + + return "Suggested because: " + "; ".join(parts) diff --git a/src/lore/recommend/feedback.py b/src/lore/recommend/feedback.py new file mode 100644 index 0000000..aff9a0d --- /dev/null +++ b/src/lore/recommend/feedback.py @@ -0,0 +1,44 @@ +"""Recommendation feedback recording and weight adjustment.""" + +from __future__ import annotations + +import logging +from typing import Any, Dict, Optional + +logger = logging.getLogger(__name__) + + +class FeedbackRecorder: + """Records recommendation feedback and adjusts per-user weights.""" + + def __init__(self, store: Any = None) -> None: + self.store = store + # Per-actor weight adjustments: actor_id -> {signal_name: weight_delta} + self._adjustments: Dict[str, Dict[str, float]] = {} + + def record( + self, + memory_id: str, + feedback: str, + actor_id: str, + signal: str = "manual", + context_hash: Optional[str] = None, + ) -> None: + """Record feedback and adjust weights.""" + if feedback not in ("positive", "negative"): + raise ValueError(f"Invalid feedback: {feedback}") + + # Adjust weights based on feedback + delta = 0.05 if feedback == "positive" else -0.05 + actor_weights = self._adjustments.setdefault(actor_id, {}) + current = actor_weights.get(signal, 0.0) + actor_weights[signal] = max(-0.5, min(0.5, current + delta)) + + logger.debug( + "Feedback recorded: %s for memory %s by %s (signal: %s)", + feedback, memory_id, actor_id, signal, + ) + + def get_weight_adjustment(self, actor_id: str, signal_name: str) -> float: + """Get the cumulative weight adjustment for an actor+signal.""" + return self._adjustments.get(actor_id, {}).get(signal_name, 0.0) diff --git a/src/lore/recommend/signals.py b/src/lore/recommend/signals.py new file mode 100644 index 0000000..c476ee8 --- /dev/null +++ b/src/lore/recommend/signals.py @@ -0,0 +1,85 @@ +"""Signal extractors for proactive recommendations.""" + +from __future__ import annotations + +import math +from datetime import datetime, timezone +from typing import List, Optional, Tuple + + +def context_similarity( + session_embedding: List[float], + memory_embedding: bytes, +) -> Tuple[float, str]: + """Cosine similarity between session context and memory.""" + import struct + + import numpy as np + + count = len(memory_embedding) // 4 + mem_vec = np.array(struct.unpack(f"{count}f", memory_embedding), dtype=np.float32) + sess_vec = np.array(session_embedding, dtype=np.float32) + + norm_m = np.linalg.norm(mem_vec) + norm_s = np.linalg.norm(sess_vec) + if norm_m < 1e-9 or norm_s < 1e-9: + return 0.0, "No meaningful similarity" + + sim = float(np.dot(mem_vec, sess_vec) / (norm_m * norm_s)) + return max(0, sim), f"Context similarity: {sim:.2f}" + + +def entity_overlap( + session_entities: List[str], + memory_entities: List[str], +) -> Tuple[float, str]: + """Fraction of session entities found in memory entities.""" + if not session_entities or not memory_entities: + return 0.0, "No entity overlap" + + session_set = set(e.lower() for e in session_entities) + memory_set = set(e.lower() for e in memory_entities) + overlap = session_set & memory_set + + if not overlap: + return 0.0, "No entity overlap" + + score = len(overlap) / len(session_set) + names = ", ".join(list(overlap)[:3]) + return score, f"Shared entities: {names}" + + +def temporal_pattern( + memory_created_at: str, + current_hour: Optional[int] = None, +) -> Tuple[float, str]: + """Score based on time-of-day access patterns.""" + if current_hour is None: + current_hour = datetime.now(timezone.utc).hour + + try: + created = datetime.fromisoformat(memory_created_at) + if created.tzinfo is None: + created = created.replace(tzinfo=timezone.utc) + memory_hour = created.hour + + # Higher score for memories created at similar time of day + diff = abs(current_hour - memory_hour) + if diff > 12: + diff = 24 - diff + score = max(0, 1.0 - diff / 12.0) * 0.3 # weak signal + return score, f"Similar time of day (hour {memory_hour})" + except (ValueError, TypeError): + return 0.0, "No temporal pattern" + + +def access_pattern( + access_count: int, + last_accessed_at: Optional[str], +) -> Tuple[float, str]: + """Score based on access frequency and recency.""" + if access_count == 0: + return 0.0, "Never accessed" + + score = min(1.0, math.log(access_count + 1) / 5.0) * 0.5 + return score, f"Accessed {access_count} times" diff --git a/src/lore/recommend/types.py b/src/lore/recommend/types.py new file mode 100644 index 0000000..b9acbc0 --- /dev/null +++ b/src/lore/recommend/types.py @@ -0,0 +1,34 @@ +"""Recommendation data types.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import List, Optional + + +@dataclass +class RecommendationSignal: + """A single signal contributing to a recommendation score.""" + name: str + score: float + weight: float + explanation: str + + +@dataclass +class Recommendation: + """A proactive memory recommendation.""" + memory_id: str + content_preview: str + score: float + signals: List[RecommendationSignal] = field(default_factory=list) + explanation: str = "" + + +@dataclass +class RecommendationFeedback: + """User feedback on a recommendation.""" + memory_id: str + feedback: str # "positive" or "negative" + actor_id: str + context_hash: Optional[str] = None diff --git a/src/lore/server/alerting.py b/src/lore/server/alerting.py new file mode 100644 index 0000000..59be548 --- /dev/null +++ b/src/lore/server/alerting.py @@ -0,0 +1,106 @@ +"""Alert channel abstraction for SLO alerts.""" + +from __future__ import annotations + +import json +import logging +import os +from abc import ABC, abstractmethod +from typing import Any, Dict + +logger = logging.getLogger(__name__) + + +class AlertChannel(ABC): + """Abstract base class for alert delivery channels.""" + + @abstractmethod + async def send(self, alert: Dict[str, Any]) -> bool: + """Send an alert. Returns True on success.""" + + +class WebhookChannel(AlertChannel): + """Deliver alerts via HTTP webhook.""" + + def __init__(self, url: str, headers: Dict[str, str] | None = None) -> None: + self.url = url + self.headers = headers or {} + + async def send(self, alert: Dict[str, Any]) -> bool: + try: + import httpx + async with httpx.AsyncClient(timeout=10) as client: + resp = await client.post( + self.url, + json=alert, + headers=self.headers, + ) + return 200 <= resp.status_code < 300 + except ImportError: + import urllib.request + req = urllib.request.Request( + self.url, + data=json.dumps(alert).encode(), + headers={ + "Content-Type": "application/json", + **self.headers, + }, + method="POST", + ) + with urllib.request.urlopen(req, timeout=10) as resp: + return resp.status < 300 + except Exception: + logger.warning("Webhook alert failed for %s", self.url, exc_info=True) + return False + + +class EmailChannel(AlertChannel): + """Deliver alerts via SMTP email.""" + + def __init__( + self, + to_addr: str, + smtp_host: str | None = None, + smtp_port: int | None = None, + smtp_user: str | None = None, + smtp_pass: str | None = None, + from_addr: str | None = None, + ) -> None: + self.to_addr = to_addr + self.smtp_host = smtp_host or os.environ.get("SMTP_HOST", "") + self.smtp_port = smtp_port or int(os.environ.get("SMTP_PORT", "587")) + self.smtp_user = smtp_user or os.environ.get("SMTP_USER", "") + self.smtp_pass = smtp_pass or os.environ.get("SMTP_PASS", "") + self.from_addr = from_addr or os.environ.get("SMTP_FROM", self.smtp_user) + + async def send(self, alert: Dict[str, Any]) -> bool: + if not self.smtp_host: + logger.warning("Email alert skipped — SMTP not configured") + return False + + try: + import smtplib + from email.message import EmailMessage + + msg = EmailMessage() + msg["Subject"] = f"SLO Alert: {alert.get('slo_name', 'Unknown')}" + msg["From"] = self.from_addr + msg["To"] = self.to_addr + msg.set_content( + f"SLO Alert\n\n" + f"Name: {alert.get('slo_name')}\n" + f"Metric: {alert.get('metric')}\n" + f"Value: {alert.get('value')}\n" + f"Threshold: {alert.get('threshold')}\n" + f"Status: {alert.get('status')}\n" + ) + + with smtplib.SMTP(self.smtp_host, self.smtp_port) as server: + if self.smtp_user: + server.starttls() + server.login(self.smtp_user, self.smtp_pass) + server.send_message(msg) + return True + except Exception: + logger.warning("Email alert failed", exc_info=True) + return False diff --git a/src/lore/server/app.py b/src/lore/server/app.py index cb41531..8794c66 100644 --- a/src/lore/server/app.py +++ b/src/lore/server/app.py @@ -4,6 +4,7 @@ import hashlib import logging +import os import secrets from contextlib import asynccontextmanager from typing import AsyncIterator @@ -32,6 +33,7 @@ from lore.server.logging_config import setup_logging from lore.server.middleware import install_middleware from lore.server.routes.analytics import router as analytics_router +from lore.server.routes.audit import router as audit_router from lore.server.routes.conversations import router as conversations_router from lore.server.routes.export import router as export_router from lore.server.routes.graph import router as graph_router @@ -39,12 +41,19 @@ from lore.server.routes.keys import router as keys_router from lore.server.routes.lessons import router as lessons_router from lore.server.routes.memories import router as memories_router +from lore.server.routes.plugins import router as plugins_router +from lore.server.routes.policies import router as policies_router +from lore.server.routes.profiles import router as profiles_router from lore.server.routes.recent import router as recent_router +from lore.server.routes.recommendations import router as recommendations_router from lore.server.routes.retrieve import router as retrieve_router from lore.server.routes.review import router as review_router +from lore.server.routes.setup_validation import router as setup_validation_router from lore.server.routes.sharing import rate_router from lore.server.routes.sharing import router as sharing_router +from lore.server.routes.slo import router as slo_router from lore.server.routes.topics import router as topics_router +from lore.server.routes.workspaces import router as workspaces_router setup_logging() logger = logging.getLogger(__name__) @@ -61,7 +70,21 @@ async def lifespan(app: FastAPI) -> AsyncIterator[None]: pool = await init_pool(db_url) await run_migrations(pool, settings.migrations_dir) + + # Start background tasks + import asyncio + + from lore.server.scheduler import policy_scheduler_loop + from lore.server.slo_checker import slo_checker_loop + + slo_check_interval = int(os.environ.get("SLO_CHECK_INTERVAL", "60")) + slo_task = asyncio.create_task(slo_checker_loop(slo_check_interval)) + scheduler_task = asyncio.create_task(policy_scheduler_loop(60)) + yield + + slo_task.cancel() + scheduler_task.cancel() await close_pool() @@ -86,6 +109,14 @@ async def lifespan(app: FastAPI) -> AsyncIterator[None]: app.include_router(graph_router) app.include_router(review_router) app.include_router(topics_router) +app.include_router(setup_validation_router) +app.include_router(slo_router) +app.include_router(profiles_router) +app.include_router(policies_router) +app.include_router(workspaces_router) +app.include_router(audit_router) +app.include_router(plugins_router) +app.include_router(recommendations_router) # ── UI static files ──────────────────────────────────────────────── import importlib.resources as _pkg_resources # noqa: E402 diff --git a/src/lore/server/audit.py b/src/lore/server/audit.py new file mode 100644 index 0000000..b4b2f27 --- /dev/null +++ b/src/lore/server/audit.py @@ -0,0 +1,51 @@ +"""Async audit writer helper.""" + +from __future__ import annotations + +import asyncio +import logging +from typing import Any, Dict, Optional + +logger = logging.getLogger(__name__) + + +async def write_audit_log( + *, + org_id: str, + actor_id: str, + actor_type: str, + action: str, + workspace_id: Optional[str] = None, + resource_type: Optional[str] = None, + resource_id: Optional[str] = None, + metadata: Optional[Dict[str, Any]] = None, + ip_address: Optional[str] = None, +) -> None: + """Insert an audit log entry (fire-and-forget safe).""" + import json + + try: + from lore.server.db import get_pool + pool = await get_pool() + async with pool.acquire() as conn: + await conn.execute( + """INSERT INTO audit_log + (org_id, workspace_id, actor_id, actor_type, action, + resource_type, resource_id, metadata, ip_address) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8::jsonb, $9::inet)""", + org_id, workspace_id, actor_id, actor_type, action, + resource_type, resource_id, + json.dumps(metadata or {}), + ip_address, + ) + except Exception: + logger.warning("Failed to write audit log", exc_info=True) + + +def fire_audit_log(**kwargs) -> None: + """Schedule an audit log write as a fire-and-forget task.""" + try: + loop = asyncio.get_running_loop() + loop.create_task(write_audit_log(**kwargs)) + except RuntimeError: + pass diff --git a/src/lore/server/config.py b/src/lore/server/config.py index a7042e0..1a39336 100644 --- a/src/lore/server/config.py +++ b/src/lore/server/config.py @@ -34,6 +34,16 @@ class Settings: log_format: str = "pretty" # "json" or "pretty" log_level: str = "INFO" + # SLO Dashboard (F3) + slo_check_interval_seconds: int = 60 + alert_webhook_url: Optional[str] = None + + # SMTP for email alerts + smtp_host: Optional[str] = None + smtp_port: int = 587 + smtp_user: Optional[str] = None + smtp_from: Optional[str] = None + @classmethod def from_env(cls) -> Settings: # Resolve Docker secrets / AWS Secrets Manager before reading env @@ -55,6 +65,12 @@ def from_env(cls) -> Settings: metrics_enabled=os.environ.get("METRICS_ENABLED", "true").lower() in ("true", "1", "yes"), log_format=os.environ.get("LOG_FORMAT", "pretty"), log_level=os.environ.get("LOG_LEVEL", "INFO").upper(), + slo_check_interval_seconds=int(os.environ.get("SLO_CHECK_INTERVAL", "60")), + alert_webhook_url=os.environ.get("ALERT_WEBHOOK_URL"), + smtp_host=os.environ.get("SMTP_HOST"), + smtp_port=int(os.environ.get("SMTP_PORT", "587")), + smtp_user=os.environ.get("SMTP_USER"), + smtp_from=os.environ.get("SMTP_FROM"), ) diff --git a/src/lore/server/routes/audit.py b/src/lore/server/routes/audit.py new file mode 100644 index 0000000..036b380 --- /dev/null +++ b/src/lore/server/routes/audit.py @@ -0,0 +1,98 @@ +"""Audit log endpoints — GET /v1/audit.""" + +from __future__ import annotations + +import logging +from typing import Any, Dict, List, Optional + +try: + from fastapi import APIRouter, Depends, Query +except ImportError: + raise ImportError("FastAPI is required.") + +from pydantic import BaseModel + +from lore.server.auth import AuthContext, get_auth_context +from lore.server.db import get_pool + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/v1/audit", tags=["audit"]) + + +class AuditEntry(BaseModel): + id: int + org_id: str + workspace_id: Optional[str] = None + actor_id: str + actor_type: str + action: str + resource_type: Optional[str] = None + resource_id: Optional[str] = None + metadata: Dict[str, Any] = {} + ip_address: Optional[str] = None + created_at: Optional[str] = None + + +def _ts(val) -> Optional[str]: + if val is None: + return None + from datetime import datetime + if isinstance(val, datetime): + return val.isoformat() + return str(val) + + +@router.get("", response_model=List[AuditEntry]) +async def query_audit_log( + workspace_id: Optional[str] = Query(None), + action: Optional[str] = Query(None), + actor_id: Optional[str] = Query(None), + since: Optional[str] = Query(None, description="ISO 8601 datetime"), + limit: int = Query(50, ge=1, le=500), + auth: AuthContext = Depends(get_auth_context), +) -> List[AuditEntry]: + """Query the audit log with filters.""" + pool = await get_pool() + async with pool.acquire() as conn: + params: list = [auth.org_id] + where_parts = ["org_id = $1"] + + if workspace_id: + params.append(workspace_id) + where_parts.append(f"workspace_id = ${len(params)}") + if action: + params.append(action) + where_parts.append(f"action = ${len(params)}") + if actor_id: + params.append(actor_id) + where_parts.append(f"actor_id = ${len(params)}") + if since: + params.append(since) + where_parts.append(f"created_at >= ${len(params)}::timestamptz") + + params.append(limit) + where_sql = " AND ".join(where_parts) + + rows = await conn.fetch( + f"""SELECT * FROM audit_log + WHERE {where_sql} + ORDER BY created_at DESC + LIMIT ${len(params)}""", + *params, + ) + + return [ + AuditEntry( + id=r["id"], org_id=r["org_id"], + workspace_id=r["workspace_id"], + actor_id=r["actor_id"], actor_type=r["actor_type"], + action=r["action"], + resource_type=r["resource_type"], + resource_id=r["resource_id"], + metadata=r["metadata"] or {}, + ip_address=str(r["ip_address"]) if r["ip_address"] else None, + created_at=_ts(r["created_at"]), + ) + for r in rows + ] diff --git a/src/lore/server/routes/plugins.py b/src/lore/server/routes/plugins.py new file mode 100644 index 0000000..caa4a72 --- /dev/null +++ b/src/lore/server/routes/plugins.py @@ -0,0 +1,85 @@ +"""Plugin management API — /v1/plugins.""" + +from __future__ import annotations + +import logging +from typing import Any, Dict, List + +try: + from fastapi import APIRouter, HTTPException +except ImportError: + raise ImportError("FastAPI is required.") + +from pydantic import BaseModel + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/v1/plugins", tags=["plugins"]) + +# Module-level registry (initialized in app lifespan) +_registry = None + + +def set_registry(registry) -> None: + global _registry + _registry = registry + + +def _get_registry(): + if _registry is None: + from lore.plugin.registry import PluginRegistry + return PluginRegistry() + return _registry + + +class PluginInfo(BaseModel): + name: str + version: str + description: str = "" + priority: int = 100 + enabled: bool = True + + +@router.get("", response_model=List[PluginInfo]) +async def list_plugins() -> List[PluginInfo]: + registry = _get_registry() + return [PluginInfo(**p) for p in registry.list_plugins()] + + +@router.post("/{name}/enable") +async def enable_plugin(name: str) -> Dict[str, Any]: + registry = _get_registry() + if registry.enable(name): + return {"status": "enabled", "name": name} + raise HTTPException(404, f"Plugin '{name}' not found") + + +@router.post("/{name}/disable") +async def disable_plugin(name: str) -> Dict[str, Any]: + registry = _get_registry() + if registry.disable(name): + return {"status": "disabled", "name": name} + raise HTTPException(404, f"Plugin '{name}' not found") + + +@router.post("/{name}/reload") +async def reload_plugin(name: str) -> Dict[str, Any]: + registry = _get_registry() + if registry.reload(name): + return {"status": "reloaded", "name": name} + raise HTTPException(404, f"Plugin '{name}' not found or reload failed") + + +@router.get("/{name}", response_model=PluginInfo) +async def get_plugin(name: str) -> PluginInfo: + registry = _get_registry() + plugin = registry.get(name) + if not plugin: + raise HTTPException(404, f"Plugin '{name}' not found") + return PluginInfo( + name=plugin.meta.name, + version=plugin.meta.version, + description=plugin.meta.description, + priority=plugin.meta.priority, + enabled=plugin.meta.name not in registry._disabled, + ) diff --git a/src/lore/server/routes/policies.py b/src/lore/server/routes/policies.py new file mode 100644 index 0000000..7b96f30 --- /dev/null +++ b/src/lore/server/routes/policies.py @@ -0,0 +1,376 @@ +"""Retention policies CRUD — /v1/policies endpoints.""" + +from __future__ import annotations + +import json +import logging +from typing import Any, Dict, List, Optional + +try: + from fastapi import APIRouter, Depends, HTTPException, Query +except ImportError: + raise ImportError("FastAPI is required. Install with: pip install lore-sdk[server]") + +from pydantic import BaseModel + +from lore.server.auth import AuthContext, get_auth_context, require_role +from lore.server.db import get_pool + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/v1/policies", tags=["policies"]) + + +class PolicyCreateRequest(BaseModel): + name: str + retention_window: Dict[str, Any] = {"working": 3600, "short": 604800, "long": None} + snapshot_schedule: Optional[str] = None + encryption_required: bool = False + max_snapshots: int = 50 + is_active: bool = True + + +class PolicyUpdateRequest(BaseModel): + name: Optional[str] = None + retention_window: Optional[Dict[str, Any]] = None + snapshot_schedule: Optional[str] = None + encryption_required: Optional[bool] = None + max_snapshots: Optional[int] = None + is_active: Optional[bool] = None + + +class PolicyResponse(BaseModel): + id: str + org_id: str + name: str + retention_window: Dict[str, Any] + snapshot_schedule: Optional[str] = None + encryption_required: bool + max_snapshots: int + is_active: bool + created_at: Optional[str] = None + updated_at: Optional[str] = None + + +class DrillResultResponse(BaseModel): + id: str + snapshot_name: str + started_at: Optional[str] = None + completed_at: Optional[str] = None + recovery_time_ms: Optional[int] = None + memories_restored: Optional[int] = None + status: str + error: Optional[str] = None + + +class ComplianceResponse(BaseModel): + policy_id: str + policy_name: str + compliant: bool + issues: List[str] = [] + + +def _ts(val) -> Optional[str]: + if val is None: + return None + from datetime import datetime + if isinstance(val, datetime): + return val.isoformat() + return str(val) + + +@router.get("", response_model=List[PolicyResponse]) +async def list_policies( + auth: AuthContext = Depends(get_auth_context), +) -> List[PolicyResponse]: + """List all retention policies.""" + pool = await get_pool() + async with pool.acquire() as conn: + rows = await conn.fetch( + "SELECT * FROM retention_policies WHERE org_id = $1 ORDER BY name", + auth.org_id, + ) + return [ + PolicyResponse( + id=r["id"], org_id=r["org_id"], name=r["name"], + retention_window=r["retention_window"], + snapshot_schedule=r["snapshot_schedule"], + encryption_required=r["encryption_required"], + max_snapshots=r["max_snapshots"], + is_active=r["is_active"], + created_at=_ts(r["created_at"]), + updated_at=_ts(r["updated_at"]), + ) + for r in rows + ] + + +@router.post("", response_model=PolicyResponse, status_code=201) +async def create_policy( + body: PolicyCreateRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> PolicyResponse: + """Create a retention policy.""" + from ulid import ULID + policy_id = str(ULID()) + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + """INSERT INTO retention_policies + (id, org_id, name, retention_window, snapshot_schedule, + encryption_required, max_snapshots, is_active) + VALUES ($1, $2, $3, $4::jsonb, $5, $6, $7, $8) + RETURNING *""", + policy_id, auth.org_id, body.name, + json.dumps(body.retention_window), + body.snapshot_schedule, body.encryption_required, + body.max_snapshots, body.is_active, + ) + return PolicyResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + retention_window=row["retention_window"], + snapshot_schedule=row["snapshot_schedule"], + encryption_required=row["encryption_required"], + max_snapshots=row["max_snapshots"], + is_active=row["is_active"], + created_at=_ts(row["created_at"]), + updated_at=_ts(row["updated_at"]), + ) + + +@router.get("/{policy_id}", response_model=PolicyResponse) +async def get_policy( + policy_id: str, + auth: AuthContext = Depends(get_auth_context), +) -> PolicyResponse: + """Get a policy with compliance info.""" + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + "SELECT * FROM retention_policies WHERE id = $1 AND org_id = $2", + policy_id, auth.org_id, + ) + if not row: + raise HTTPException(404, "Policy not found") + return PolicyResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + retention_window=row["retention_window"], + snapshot_schedule=row["snapshot_schedule"], + encryption_required=row["encryption_required"], + max_snapshots=row["max_snapshots"], + is_active=row["is_active"], + created_at=_ts(row["created_at"]), + updated_at=_ts(row["updated_at"]), + ) + + +@router.put("/{policy_id}", response_model=PolicyResponse) +async def update_policy( + policy_id: str, + body: PolicyUpdateRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> PolicyResponse: + """Update a retention policy.""" + pool = await get_pool() + async with pool.acquire() as conn: + existing = await conn.fetchrow( + "SELECT id FROM retention_policies WHERE id = $1 AND org_id = $2", + policy_id, auth.org_id, + ) + if not existing: + raise HTTPException(404, "Policy not found") + + updates = [] + params: list = [policy_id, auth.org_id] + if body.name is not None: + params.append(body.name) + updates.append(f"name = ${len(params)}") + if body.retention_window is not None: + params.append(json.dumps(body.retention_window)) + updates.append(f"retention_window = ${len(params)}::jsonb") + if body.snapshot_schedule is not None: + params.append(body.snapshot_schedule) + updates.append(f"snapshot_schedule = ${len(params)}") + if body.encryption_required is not None: + params.append(body.encryption_required) + updates.append(f"encryption_required = ${len(params)}") + if body.max_snapshots is not None: + params.append(body.max_snapshots) + updates.append(f"max_snapshots = ${len(params)}") + if body.is_active is not None: + params.append(body.is_active) + updates.append(f"is_active = ${len(params)}") + + if not updates: + raise HTTPException(400, "No fields to update") + + updates.append("updated_at = now()") + row = await conn.fetchrow( + f"""UPDATE retention_policies SET {", ".join(updates)} + WHERE id = $1 AND org_id = $2 RETURNING *""", + *params, + ) + + return PolicyResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + retention_window=row["retention_window"], + snapshot_schedule=row["snapshot_schedule"], + encryption_required=row["encryption_required"], + max_snapshots=row["max_snapshots"], + is_active=row["is_active"], + created_at=_ts(row["created_at"]), + updated_at=_ts(row["updated_at"]), + ) + + +@router.delete("/{policy_id}", status_code=204) +async def delete_policy( + policy_id: str, + auth: AuthContext = Depends(require_role("admin")), +) -> None: + """Delete a retention policy.""" + pool = await get_pool() + async with pool.acquire() as conn: + result = await conn.execute( + "DELETE FROM retention_policies WHERE id = $1 AND org_id = $2", + policy_id, auth.org_id, + ) + if result == "DELETE 0": + raise HTTPException(404, "Policy not found") + + +@router.post("/{policy_id}/drill", response_model=DrillResultResponse, status_code=201) +async def run_drill( + policy_id: str, + auth: AuthContext = Depends(require_role("admin")), +) -> DrillResultResponse: + """Execute a restore drill against the latest snapshot.""" + import time + from datetime import datetime, timezone + + from ulid import ULID + + pool = await get_pool() + async with pool.acquire() as conn: + policy = await conn.fetchrow( + "SELECT * FROM retention_policies WHERE id = $1 AND org_id = $2", + policy_id, auth.org_id, + ) + if not policy: + raise HTTPException(404, "Policy not found") + + # Find latest snapshot for this policy + snapshot = await conn.fetchrow( + """SELECT * FROM snapshot_metadata + WHERE policy_id = $1 AND org_id = $2 + ORDER BY created_at DESC LIMIT 1""", + policy_id, auth.org_id, + ) + + drill_id = str(ULID()) + started = datetime.now(timezone.utc) + snapshot_name = snapshot["name"] if snapshot else "none" + snapshot_id = snapshot["id"] if snapshot else None + + start_time = time.monotonic() + + # Simulate restore (in production, this would actually restore) + memories_restored = snapshot["memory_count"] if snapshot else 0 + status = "success" if snapshot else "failed" + error = None if snapshot else "No snapshot available" + + elapsed_ms = int((time.monotonic() - start_time) * 1000) + completed = datetime.now(timezone.utc) + + await conn.execute( + """INSERT INTO restore_drill_results + (id, org_id, snapshot_id, snapshot_name, started_at, completed_at, + recovery_time_ms, memories_restored, status, error) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)""", + drill_id, auth.org_id, snapshot_id, snapshot_name, + started, completed, elapsed_ms, memories_restored, + status, error, + ) + + return DrillResultResponse( + id=drill_id, snapshot_name=snapshot_name, + started_at=started.isoformat(), completed_at=completed.isoformat(), + recovery_time_ms=elapsed_ms, memories_restored=memories_restored, + status=status, error=error, + ) + + +@router.get("/{policy_id}/drills", response_model=List[DrillResultResponse]) +async def list_drills( + policy_id: str, + limit: int = Query(20, ge=1, le=100), + auth: AuthContext = Depends(get_auth_context), +) -> List[DrillResultResponse]: + """List drill results for a policy.""" + pool = await get_pool() + async with pool.acquire() as conn: + rows = await conn.fetch( + """SELECT r.* FROM restore_drill_results r + JOIN snapshot_metadata s ON s.id = r.snapshot_id + WHERE s.policy_id = $1 AND r.org_id = $2 + ORDER BY r.created_at DESC LIMIT $3""", + policy_id, auth.org_id, limit, + ) + return [ + DrillResultResponse( + id=r["id"], snapshot_name=r["snapshot_name"], + started_at=_ts(r["started_at"]), completed_at=_ts(r["completed_at"]), + recovery_time_ms=r["recovery_time_ms"], + memories_restored=r["memories_restored"], + status=r["status"], error=r["error"], + ) + for r in rows + ] + + +@router.get("/compliance", response_model=List[ComplianceResponse]) +async def check_compliance( + auth: AuthContext = Depends(get_auth_context), +) -> List[ComplianceResponse]: + """Cross-policy compliance summary.""" + pool = await get_pool() + async with pool.acquire() as conn: + policies = await conn.fetch( + "SELECT * FROM retention_policies WHERE org_id = $1 AND is_active = TRUE", + auth.org_id, + ) + + results: List[ComplianceResponse] = [] + for policy in policies: + issues: List[str] = [] + + # Check snapshot count + snapshot_count = await conn.fetchval( + "SELECT COUNT(*) FROM snapshot_metadata WHERE policy_id = $1", + policy["id"], + ) + if snapshot_count > policy["max_snapshots"]: + issues.append( + f"Snapshot count ({snapshot_count}) exceeds max ({policy['max_snapshots']})" + ) + + # Check for recent drill + last_drill = await conn.fetchrow( + """SELECT status, created_at FROM restore_drill_results + WHERE org_id = $1 + ORDER BY created_at DESC LIMIT 1""", + auth.org_id, + ) + if not last_drill: + issues.append("No restore drill has been run") + elif last_drill["status"] == "failed": + issues.append("Last restore drill failed") + + results.append(ComplianceResponse( + policy_id=policy["id"], + policy_name=policy["name"], + compliant=len(issues) == 0, + issues=issues, + )) + + return results diff --git a/src/lore/server/routes/profiles.py b/src/lore/server/routes/profiles.py new file mode 100644 index 0000000..13d2bae --- /dev/null +++ b/src/lore/server/routes/profiles.py @@ -0,0 +1,271 @@ +"""Retrieval profiles CRUD — GET/POST/PUT/DELETE /v1/profiles.""" + +from __future__ import annotations + +import logging +import time as _time +from typing import Any, Dict, List, Optional + +try: + from fastapi import APIRouter, Depends, HTTPException +except ImportError: + raise ImportError("FastAPI is required. Install with: pip install lore-sdk[server]") + +from pydantic import BaseModel + +from lore.server.auth import AuthContext, get_auth_context, require_role +from lore.server.db import get_pool + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/v1/profiles", tags=["profiles"]) + + +class ProfileCreateRequest(BaseModel): + name: str + semantic_weight: float = 1.0 + graph_weight: float = 1.0 + recency_bias: float = 30.0 + tier_filters: Optional[List[str]] = None + min_score: float = 0.3 + max_results: int = 10 + + +class ProfileUpdateRequest(BaseModel): + name: Optional[str] = None + semantic_weight: Optional[float] = None + graph_weight: Optional[float] = None + recency_bias: Optional[float] = None + tier_filters: Optional[List[str]] = None + min_score: Optional[float] = None + max_results: Optional[int] = None + + +class ProfileResponse(BaseModel): + id: str + org_id: str + name: str + semantic_weight: float + graph_weight: float + recency_bias: float + tier_filters: Optional[List[str]] = None + min_score: float + max_results: int + is_preset: bool + created_at: Optional[str] = None + updated_at: Optional[str] = None + + +def _ts(val) -> Optional[str]: + if val is None: + return None + from datetime import datetime + if isinstance(val, datetime): + return val.isoformat() + return str(val) + + +def _row_to_response(row) -> ProfileResponse: + return ProfileResponse( + id=row["id"], + org_id=row["org_id"], + name=row["name"], + semantic_weight=float(row["semantic_weight"]), + graph_weight=float(row["graph_weight"]), + recency_bias=float(row["recency_bias"]), + tier_filters=list(row["tier_filters"]) if row["tier_filters"] else None, + min_score=float(row["min_score"]), + max_results=row["max_results"], + is_preset=row["is_preset"], + created_at=_ts(row["created_at"]), + updated_at=_ts(row["updated_at"]), + ) + + +# In-memory cache for profiles (60s TTL) +_profile_cache: Dict[str, tuple] = {} # key -> (profile_dict, timestamp) +_PROFILE_CACHE_TTL = 60.0 + + +def _get_cached_profile(key: str) -> Optional[Dict[str, Any]]: + cached = _profile_cache.get(key) + if cached and _time.monotonic() - cached[1] < _PROFILE_CACHE_TTL: + return cached[0] + return None + + +def _set_cached_profile(key: str, profile: Dict[str, Any]) -> None: + _profile_cache[key] = (profile, _time.monotonic()) + + +@router.get("", response_model=List[ProfileResponse]) +async def list_profiles( + auth: AuthContext = Depends(get_auth_context), +) -> List[ProfileResponse]: + """List profiles (org + global presets).""" + pool = await get_pool() + async with pool.acquire() as conn: + rows = await conn.fetch( + """SELECT * FROM retrieval_profiles + WHERE org_id = $1 OR org_id = '__global__' + ORDER BY is_preset DESC, name""", + auth.org_id, + ) + return [_row_to_response(r) for r in rows] + + +@router.get("/{profile_id}", response_model=ProfileResponse) +async def get_profile( + profile_id: str, + auth: AuthContext = Depends(get_auth_context), +) -> ProfileResponse: + """Get a specific profile.""" + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + """SELECT * FROM retrieval_profiles + WHERE id = $1 AND (org_id = $2 OR org_id = '__global__')""", + profile_id, auth.org_id, + ) + if not row: + raise HTTPException(404, "Profile not found") + return _row_to_response(row) + + +@router.post("", response_model=ProfileResponse, status_code=201) +async def create_profile( + body: ProfileCreateRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> ProfileResponse: + """Create a custom profile.""" + from ulid import ULID + profile_id = str(ULID()) + pool = await get_pool() + async with pool.acquire() as conn: + try: + row = await conn.fetchrow( + """INSERT INTO retrieval_profiles + (id, org_id, name, semantic_weight, graph_weight, recency_bias, + tier_filters, min_score, max_results, is_preset) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, FALSE) + RETURNING *""", + profile_id, auth.org_id, body.name, + body.semantic_weight, body.graph_weight, body.recency_bias, + body.tier_filters, body.min_score, body.max_results, + ) + except Exception as e: + if "unique" in str(e).lower(): + raise HTTPException(409, f"Profile '{body.name}' already exists") + raise + return _row_to_response(row) + + +@router.put("/{profile_id}", response_model=ProfileResponse) +async def update_profile( + profile_id: str, + body: ProfileUpdateRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> ProfileResponse: + """Update a profile (not presets).""" + pool = await get_pool() + async with pool.acquire() as conn: + existing = await conn.fetchrow( + "SELECT is_preset FROM retrieval_profiles WHERE id = $1 AND org_id = $2", + profile_id, auth.org_id, + ) + if not existing: + raise HTTPException(404, "Profile not found") + if existing["is_preset"]: + raise HTTPException(403, "Cannot modify preset profiles") + + updates = [] + params: list = [profile_id, auth.org_id] + if body.name is not None: + params.append(body.name) + updates.append(f"name = ${len(params)}") + if body.semantic_weight is not None: + params.append(body.semantic_weight) + updates.append(f"semantic_weight = ${len(params)}") + if body.graph_weight is not None: + params.append(body.graph_weight) + updates.append(f"graph_weight = ${len(params)}") + if body.recency_bias is not None: + params.append(body.recency_bias) + updates.append(f"recency_bias = ${len(params)}") + if body.tier_filters is not None: + params.append(body.tier_filters) + updates.append(f"tier_filters = ${len(params)}") + if body.min_score is not None: + params.append(body.min_score) + updates.append(f"min_score = ${len(params)}") + if body.max_results is not None: + params.append(body.max_results) + updates.append(f"max_results = ${len(params)}") + + if not updates: + raise HTTPException(400, "No fields to update") + + updates.append("updated_at = now()") + set_clause = ", ".join(updates) + + row = await conn.fetchrow( + f"""UPDATE retrieval_profiles SET {set_clause} + WHERE id = $1 AND org_id = $2 + RETURNING *""", + *params, + ) + + # Invalidate cache + _profile_cache.pop(f"{auth.org_id}:{row['name']}", None) + return _row_to_response(row) + + +@router.delete("/{profile_id}", status_code=204) +async def delete_profile( + profile_id: str, + auth: AuthContext = Depends(require_role("admin")), +) -> None: + """Delete a profile (not presets).""" + pool = await get_pool() + async with pool.acquire() as conn: + existing = await conn.fetchrow( + "SELECT is_preset FROM retrieval_profiles WHERE id = $1", + profile_id, + ) + if not existing: + raise HTTPException(404, "Profile not found") + if existing["is_preset"]: + raise HTTPException(403, "Cannot delete preset profiles") + + await conn.execute( + "DELETE FROM retrieval_profiles WHERE id = $1 AND org_id = $2", + profile_id, auth.org_id, + ) + + +async def resolve_profile( + conn, org_id: str, profile_name: Optional[str], key_default: Optional[str], +) -> Optional[Dict[str, Any]]: + """Resolve a profile by name: explicit param > key default > None.""" + name = profile_name or key_default + if not name: + return None + + # Check cache + cache_key = f"{org_id}:{name}" + cached = _get_cached_profile(cache_key) + if cached: + return cached + + row = await conn.fetchrow( + """SELECT * FROM retrieval_profiles + WHERE name = $1 AND (org_id = $2 OR org_id = '__global__') + ORDER BY CASE WHEN org_id = $2 THEN 0 ELSE 1 END + LIMIT 1""", + name, org_id, + ) + if row: + profile = dict(row) + _set_cached_profile(cache_key, profile) + return profile + return None diff --git a/src/lore/server/routes/recommendations.py b/src/lore/server/routes/recommendations.py new file mode 100644 index 0000000..f8f5286 --- /dev/null +++ b/src/lore/server/routes/recommendations.py @@ -0,0 +1,168 @@ +"""Recommendation endpoints — /v1/recommendations.""" + +from __future__ import annotations + +import logging +from typing import Dict, List, Optional + +try: + from fastapi import APIRouter, Depends, HTTPException, Query +except ImportError: + raise ImportError("FastAPI is required.") + +from pydantic import BaseModel + +from lore.server.auth import AuthContext, get_auth_context +from lore.server.db import get_pool + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/v1/recommendations", tags=["recommendations"]) + + +class RecommendationResponse(BaseModel): + memory_id: str + content_preview: str + score: float + explanation: str + + +class RecommendationRequest(BaseModel): + context: str = "" + session_entities: List[str] = [] + max_results: int = 3 + + +class FeedbackRequest(BaseModel): + feedback: str # "positive" or "negative" + + +class ConfigResponse(BaseModel): + aggressiveness: float = 0.5 + enabled: bool = True + max_suggestions: int = 3 + cooldown_minutes: int = 15 + + +class ConfigUpdateRequest(BaseModel): + aggressiveness: Optional[float] = None + enabled: Optional[bool] = None + max_suggestions: Optional[int] = None + cooldown_minutes: Optional[int] = None + + +@router.get("", response_model=List[RecommendationResponse]) +async def get_recommendations( + context: str = Query("", description="Session context text"), + max_results: int = Query(3, ge=1, le=10), + auth: AuthContext = Depends(get_auth_context), +) -> List[RecommendationResponse]: + """Get proactive memory suggestions.""" + # Placeholder — in production would use RecommendationEngine + return [] + + +@router.post("", response_model=List[RecommendationResponse]) +async def post_recommendations( + body: RecommendationRequest, + auth: AuthContext = Depends(get_auth_context), +) -> List[RecommendationResponse]: + """Get suggestions with explicit context body.""" + return [] + + +@router.post("/{memory_id}/feedback") +async def submit_feedback( + memory_id: str, + body: FeedbackRequest, + auth: AuthContext = Depends(get_auth_context), +) -> Dict[str, str]: + """Submit feedback on a recommendation.""" + from ulid import ULID + + if body.feedback not in ("positive", "negative"): + raise HTTPException(400, "Feedback must be 'positive' or 'negative'") + + pool = await get_pool() + async with pool.acquire() as conn: + await conn.execute( + """INSERT INTO recommendation_feedback + (id, org_id, memory_id, actor_id, feedback) + VALUES ($1, $2, $3, $4, $5)""", + str(ULID()), auth.org_id, memory_id, auth.key_id, + body.feedback, + ) + + return {"status": "recorded", "memory_id": memory_id, "feedback": body.feedback} + + +@router.get("/config", response_model=ConfigResponse) +async def get_config( + auth: AuthContext = Depends(get_auth_context), +) -> ConfigResponse: + """Get recommendation config.""" + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + """SELECT * FROM recommendation_config + WHERE workspace_id IS NULL AND agent_id IS NULL + LIMIT 1""", + ) + if row: + return ConfigResponse( + aggressiveness=float(row["aggressiveness"]), + enabled=row["enabled"], + max_suggestions=row["max_suggestions"], + cooldown_minutes=row["cooldown_minutes"], + ) + return ConfigResponse() + + +@router.patch("/config", response_model=ConfigResponse) +async def update_config( + body: ConfigUpdateRequest, + auth: AuthContext = Depends(get_auth_context), +) -> ConfigResponse: + """Update recommendation config.""" + from ulid import ULID + + pool = await get_pool() + async with pool.acquire() as conn: + existing = await conn.fetchrow( + "SELECT id FROM recommendation_config WHERE workspace_id IS NULL AND agent_id IS NULL" + ) + + if existing: + updates = [] + params: list = [existing["id"]] + if body.aggressiveness is not None: + params.append(body.aggressiveness) + updates.append(f"aggressiveness = ${len(params)}") + if body.enabled is not None: + params.append(body.enabled) + updates.append(f"enabled = ${len(params)}") + if body.max_suggestions is not None: + params.append(body.max_suggestions) + updates.append(f"max_suggestions = ${len(params)}") + if body.cooldown_minutes is not None: + params.append(body.cooldown_minutes) + updates.append(f"cooldown_minutes = ${len(params)}") + if updates: + updates.append("updated_at = now()") + await conn.execute( + f"UPDATE recommendation_config SET {', '.join(updates)} WHERE id = $1", + *params, + ) + else: + await conn.execute( + """INSERT INTO recommendation_config + (id, aggressiveness, enabled, max_suggestions, cooldown_minutes) + VALUES ($1, $2, $3, $4, $5)""", + str(ULID()), + body.aggressiveness or 0.5, + body.enabled if body.enabled is not None else True, + body.max_suggestions or 3, + body.cooldown_minutes or 15, + ) + + return await get_config(auth) diff --git a/src/lore/server/routes/setup_validation.py b/src/lore/server/routes/setup_validation.py new file mode 100644 index 0000000..c5dad20 --- /dev/null +++ b/src/lore/server/routes/setup_validation.py @@ -0,0 +1,44 @@ +"""Setup validation endpoint — POST /v1/setup/validate.""" + +from __future__ import annotations + +import logging +import time + +try: + from fastapi import APIRouter +except ImportError: + raise ImportError("FastAPI is required. Install with: pip install lore-sdk[server]") + +from pydantic import BaseModel + +router = APIRouter(prefix="/v1/setup", tags=["setup"]) +logger = logging.getLogger(__name__) + + +class SetupValidateRequest(BaseModel): + runtime: str = "claude-code" + test_query: str = "hello" + + +class SetupValidateResponse(BaseModel): + status: str + latency_ms: float + runtime: str + server_version: str = "0.2.0" + + +@router.post("/validate", response_model=SetupValidateResponse) +async def validate_setup(body: SetupValidateRequest) -> SetupValidateResponse: + """Test connectivity and basic retrieval.""" + start = time.monotonic() + + # Just verify the server is alive and can process a request + # We don't need auth here — this is a setup validation endpoint + elapsed_ms = round((time.monotonic() - start) * 1000, 2) + + return SetupValidateResponse( + status="ok", + latency_ms=elapsed_ms, + runtime=body.runtime, + ) diff --git a/src/lore/server/routes/slo.py b/src/lore/server/routes/slo.py new file mode 100644 index 0000000..f977219 --- /dev/null +++ b/src/lore/server/routes/slo.py @@ -0,0 +1,451 @@ +"""SLO Dashboard endpoints — CRUD for SLO definitions, status, and alerts.""" + +from __future__ import annotations + +import logging +from typing import Any, Dict, List, Optional + +try: + from fastapi import APIRouter, Depends, HTTPException, Query +except ImportError: + raise ImportError("FastAPI is required. Install with: pip install lore-sdk[server]") + +from pydantic import BaseModel + +from lore.server.auth import AuthContext, get_auth_context, require_role +from lore.server.db import get_pool + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/v1/slo", tags=["slo"]) + + +# ── Request/Response Models ────────────────────────────────────── + + +class SloCreateRequest(BaseModel): + name: str + metric: str # p50_latency, p95_latency, p99_latency, hit_rate + operator: str # lt, gt + threshold: float + window_minutes: int = 60 + enabled: bool = True + alert_channels: List[Dict[str, Any]] = [] + + +class SloUpdateRequest(BaseModel): + name: Optional[str] = None + metric: Optional[str] = None + operator: Optional[str] = None + threshold: Optional[float] = None + window_minutes: Optional[int] = None + enabled: Optional[bool] = None + alert_channels: Optional[List[Dict[str, Any]]] = None + + +class SloResponse(BaseModel): + id: str + org_id: str + name: str + metric: str + operator: str + threshold: float + window_minutes: int + enabled: bool + alert_channels: List[Dict[str, Any]] = [] + created_at: Optional[str] = None + updated_at: Optional[str] = None + + +class SloStatusResponse(BaseModel): + id: str + name: str + metric: str + threshold: float + operator: str + current_value: Optional[float] = None + passing: bool = True + window_minutes: int = 60 + + +class SloAlertResponse(BaseModel): + id: int + slo_id: str + metric_value: float + threshold: float + status: str + dispatched_to: List[Dict[str, Any]] = [] + created_at: Optional[str] = None + + +VALID_METRICS = {"p50_latency", "p95_latency", "p99_latency", "hit_rate"} +VALID_OPERATORS = {"lt", "gt"} + + +def _ts(val) -> Optional[str]: + if val is None: + return None + from datetime import datetime + if isinstance(val, datetime): + return val.isoformat() + return str(val) + + +# ── CRUD Endpoints ─────────────────────────────────────────────── + + +@router.get("", response_model=List[SloResponse]) +async def list_slos( + auth: AuthContext = Depends(get_auth_context), +) -> List[SloResponse]: + """List all SLO definitions for the org.""" + pool = await get_pool() + async with pool.acquire() as conn: + rows = await conn.fetch( + """SELECT id, org_id, name, metric, operator, threshold, + window_minutes, enabled, alert_channels, created_at, updated_at + FROM slo_definitions + WHERE org_id = $1 + ORDER BY created_at DESC""", + auth.org_id, + ) + return [ + SloResponse( + id=r["id"], org_id=r["org_id"], name=r["name"], + metric=r["metric"], operator=r["operator"], + threshold=float(r["threshold"]), + window_minutes=r["window_minutes"], enabled=r["enabled"], + alert_channels=r["alert_channels"] or [], + created_at=_ts(r["created_at"]), updated_at=_ts(r["updated_at"]), + ) + for r in rows + ] + + +@router.post("", response_model=SloResponse, status_code=201) +async def create_slo( + body: SloCreateRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> SloResponse: + """Create an SLO definition.""" + if body.metric not in VALID_METRICS: + raise HTTPException(400, f"Invalid metric: {body.metric}. Must be one of: {VALID_METRICS}") + if body.operator not in VALID_OPERATORS: + raise HTTPException(400, f"Invalid operator: {body.operator}. Must be one of: {VALID_OPERATORS}") + + import json + + from ulid import ULID + + slo_id = str(ULID()) + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + """INSERT INTO slo_definitions (id, org_id, name, metric, operator, threshold, + window_minutes, enabled, alert_channels) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9::jsonb) + RETURNING id, org_id, name, metric, operator, threshold, + window_minutes, enabled, alert_channels, created_at, updated_at""", + slo_id, auth.org_id, body.name, body.metric, body.operator, + body.threshold, body.window_minutes, body.enabled, + json.dumps(body.alert_channels), + ) + + return SloResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + metric=row["metric"], operator=row["operator"], + threshold=float(row["threshold"]), + window_minutes=row["window_minutes"], enabled=row["enabled"], + alert_channels=row["alert_channels"] or [], + created_at=_ts(row["created_at"]), updated_at=_ts(row["updated_at"]), + ) + + +@router.put("/{slo_id}", response_model=SloResponse) +async def update_slo( + slo_id: str, + body: SloUpdateRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> SloResponse: + """Update an SLO definition.""" + import json + + pool = await get_pool() + async with pool.acquire() as conn: + existing = await conn.fetchrow( + "SELECT id FROM slo_definitions WHERE id = $1 AND org_id = $2", + slo_id, auth.org_id, + ) + if not existing: + raise HTTPException(404, "SLO not found") + + updates = [] + params: list = [slo_id, auth.org_id] + if body.name is not None: + params.append(body.name) + updates.append(f"name = ${len(params)}") + if body.metric is not None: + if body.metric not in VALID_METRICS: + raise HTTPException(400, f"Invalid metric: {body.metric}") + params.append(body.metric) + updates.append(f"metric = ${len(params)}") + if body.operator is not None: + if body.operator not in VALID_OPERATORS: + raise HTTPException(400, f"Invalid operator: {body.operator}") + params.append(body.operator) + updates.append(f"operator = ${len(params)}") + if body.threshold is not None: + params.append(body.threshold) + updates.append(f"threshold = ${len(params)}") + if body.window_minutes is not None: + params.append(body.window_minutes) + updates.append(f"window_minutes = ${len(params)}") + if body.enabled is not None: + params.append(body.enabled) + updates.append(f"enabled = ${len(params)}") + if body.alert_channels is not None: + params.append(json.dumps(body.alert_channels)) + updates.append(f"alert_channels = ${len(params)}::jsonb") + + if not updates: + raise HTTPException(400, "No fields to update") + + updates.append("updated_at = now()") + set_clause = ", ".join(updates) + + row = await conn.fetchrow( + f"""UPDATE slo_definitions SET {set_clause} + WHERE id = $1 AND org_id = $2 + RETURNING id, org_id, name, metric, operator, threshold, + window_minutes, enabled, alert_channels, created_at, updated_at""", + *params, + ) + + return SloResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + metric=row["metric"], operator=row["operator"], + threshold=float(row["threshold"]), + window_minutes=row["window_minutes"], enabled=row["enabled"], + alert_channels=row["alert_channels"] or [], + created_at=_ts(row["created_at"]), updated_at=_ts(row["updated_at"]), + ) + + +@router.delete("/{slo_id}", status_code=204) +async def delete_slo( + slo_id: str, + auth: AuthContext = Depends(require_role("admin")), +) -> None: + """Delete an SLO definition.""" + pool = await get_pool() + async with pool.acquire() as conn: + result = await conn.execute( + "DELETE FROM slo_definitions WHERE id = $1 AND org_id = $2", + slo_id, auth.org_id, + ) + if result == "DELETE 0": + raise HTTPException(404, "SLO not found") + + +# ── Status & Alerts ────────────────────────────────────────────── + + +@router.get("/status", response_model=List[SloStatusResponse]) +async def slo_status( + auth: AuthContext = Depends(get_auth_context), +) -> List[SloStatusResponse]: + """Get current pass/fail status for all SLOs.""" + pool = await get_pool() + async with pool.acquire() as conn: + slos = await conn.fetch( + """SELECT id, name, metric, operator, threshold, window_minutes, enabled + FROM slo_definitions + WHERE org_id = $1 AND enabled = TRUE + ORDER BY name""", + auth.org_id, + ) + + results: List[SloStatusResponse] = [] + for slo in slos: + current_value = await _compute_metric( + conn, auth.org_id, slo["metric"], slo["window_minutes"], + ) + passing = _check_threshold( + current_value, slo["operator"], float(slo["threshold"]), + ) + results.append(SloStatusResponse( + id=slo["id"], name=slo["name"], + metric=slo["metric"], + threshold=float(slo["threshold"]), + operator=slo["operator"], + current_value=current_value, + passing=passing, + window_minutes=slo["window_minutes"], + )) + + return results + + +@router.get("/alerts", response_model=List[SloAlertResponse]) +async def list_alerts( + limit: int = Query(50, ge=1, le=500), + slo_id: Optional[str] = Query(None), + auth: AuthContext = Depends(get_auth_context), +) -> List[SloAlertResponse]: + """Get alert history.""" + pool = await get_pool() + async with pool.acquire() as conn: + params: list = [auth.org_id] + where_parts = ["a.org_id = $1"] + + if slo_id: + params.append(slo_id) + where_parts.append(f"a.slo_id = ${len(params)}") + + params.append(limit) + where_sql = " AND ".join(where_parts) + + rows = await conn.fetch( + f"""SELECT a.id, a.slo_id, a.metric_value, a.threshold, + a.status, a.dispatched_to, a.created_at + FROM slo_alerts a + WHERE {where_sql} + ORDER BY a.created_at DESC + LIMIT ${len(params)}""", + *params, + ) + + return [ + SloAlertResponse( + id=r["id"], slo_id=r["slo_id"], + metric_value=float(r["metric_value"]), + threshold=float(r["threshold"]), + status=r["status"], + dispatched_to=r["dispatched_to"] or [], + created_at=_ts(r["created_at"]), + ) + for r in rows + ] + + +@router.post("/{slo_id}/test", response_model=SloAlertResponse, status_code=201) +async def test_alert( + slo_id: str, + auth: AuthContext = Depends(require_role("admin")), +) -> SloAlertResponse: + """Fire a test alert for an SLO.""" + import json + + pool = await get_pool() + async with pool.acquire() as conn: + slo = await conn.fetchrow( + "SELECT * FROM slo_definitions WHERE id = $1 AND org_id = $2", + slo_id, auth.org_id, + ) + if not slo: + raise HTTPException(404, "SLO not found") + + row = await conn.fetchrow( + """INSERT INTO slo_alerts (org_id, slo_id, metric_value, threshold, status, dispatched_to) + VALUES ($1, $2, $3, $4, 'firing', $5::jsonb) + RETURNING id, slo_id, metric_value, threshold, status, dispatched_to, created_at""", + auth.org_id, slo_id, 0.0, float(slo["threshold"]), + json.dumps([{"channel": "test", "sent_at": _ts(None)}]), + ) + + return SloAlertResponse( + id=row["id"], slo_id=row["slo_id"], + metric_value=float(row["metric_value"]), + threshold=float(row["threshold"]), + status=row["status"], + dispatched_to=row["dispatched_to"] or [], + created_at=_ts(row["created_at"]), + ) + + +# ── Timeseries (analytics extension) ──────────────────────────── + + +@router.get("/timeseries") +async def slo_timeseries( + metric: str = Query("p99_latency"), + window_hours: int = Query(24, ge=1, le=720), + bucket_minutes: int = Query(60, ge=1, le=1440), + auth: AuthContext = Depends(get_auth_context), +) -> Dict[str, Any]: + """Time-series data for SLO charts.""" + if metric not in VALID_METRICS: + raise HTTPException(400, f"Invalid metric: {metric}") + + pool = await get_pool() + async with pool.acquire() as conn: + metric_sql = _metric_sql(metric) + rows = await conn.fetch( + f"""SELECT + date_trunc('hour', created_at) + + (EXTRACT(minute FROM created_at)::int / $3 * $3) * interval '1 minute' + AS bucket, + {metric_sql} + FROM retrieval_events + WHERE org_id = $1 + AND created_at >= now() - make_interval(hours => $2) + GROUP BY bucket + ORDER BY bucket""", + auth.org_id, window_hours, bucket_minutes, + ) + + return { + "metric": metric, + "window_hours": window_hours, + "bucket_minutes": bucket_minutes, + "data": [ + { + "timestamp": _ts(r["bucket"]), + "value": round(float(r["value"]), 4) if r["value"] is not None else None, + } + for r in rows + ], + } + + +# ── Helpers ────────────────────────────────────────────────────── + + +async def _compute_metric( + conn, org_id: str, metric: str, window_minutes: int, +) -> Optional[float]: + """Compute a metric value from retrieval_events within a window.""" + metric_sql = _metric_sql(metric) + row = await conn.fetchrow( + f"""SELECT {metric_sql} + FROM retrieval_events + WHERE org_id = $1 + AND created_at >= now() - make_interval(mins => $2)""", + org_id, window_minutes, + ) + if row and row["value"] is not None: + return round(float(row["value"]), 4) + return None + + +def _metric_sql(metric: str) -> str: + """Return the SQL expression for a given metric.""" + return { + "p50_latency": "percentile_cont(0.50) WITHIN GROUP (ORDER BY query_time_ms) AS value", + "p95_latency": "percentile_cont(0.95) WITHIN GROUP (ORDER BY query_time_ms) AS value", + "p99_latency": "percentile_cont(0.99) WITHIN GROUP (ORDER BY query_time_ms) AS value", + "hit_rate": "(COUNT(*) FILTER (WHERE results_count > 0))::float / GREATEST(COUNT(*), 1) AS value", + }[metric] + + +def _check_threshold( + value: Optional[float], operator: str, threshold: float, +) -> bool: + """Check if a metric value passes the SLO threshold.""" + if value is None: + return True # No data = passing (no violation) + if operator == "lt": + return value < threshold + if operator == "gt": + return value > threshold + return True diff --git a/src/lore/server/routes/workspaces.py b/src/lore/server/routes/workspaces.py new file mode 100644 index 0000000..a4058f0 --- /dev/null +++ b/src/lore/server/routes/workspaces.py @@ -0,0 +1,253 @@ +"""Workspace management endpoints — /v1/workspaces.""" + +from __future__ import annotations + +import logging +from typing import Any, Dict, List, Optional + +try: + from fastapi import APIRouter, Depends, HTTPException +except ImportError: + raise ImportError("FastAPI is required.") + +from pydantic import BaseModel + +from lore.server.auth import AuthContext, get_auth_context, require_role +from lore.server.db import get_pool + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/v1/workspaces", tags=["workspaces"]) + + +class WorkspaceCreateRequest(BaseModel): + name: str + slug: str + settings: Dict[str, Any] = {} + + +class WorkspaceResponse(BaseModel): + id: str + org_id: str + name: str + slug: str + settings: Dict[str, Any] = {} + created_at: Optional[str] = None + archived_at: Optional[str] = None + + +class MemberAddRequest(BaseModel): + user_id: str + role: str = "writer" + + +class MemberResponse(BaseModel): + id: str + workspace_id: str + user_id: Optional[str] = None + role: str + invited_at: Optional[str] = None + accepted_at: Optional[str] = None + + +def _ts(val) -> Optional[str]: + if val is None: + return None + from datetime import datetime + if isinstance(val, datetime): + return val.isoformat() + return str(val) + + +@router.post("", response_model=WorkspaceResponse, status_code=201) +async def create_workspace( + body: WorkspaceCreateRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> WorkspaceResponse: + from ulid import ULID + ws_id = str(ULID()) + import json + pool = await get_pool() + async with pool.acquire() as conn: + try: + row = await conn.fetchrow( + """INSERT INTO workspaces (id, org_id, name, slug, settings) + VALUES ($1, $2, $3, $4, $5::jsonb) RETURNING *""", + ws_id, auth.org_id, body.name, body.slug, + json.dumps(body.settings), + ) + except Exception as e: + if "unique" in str(e).lower(): + raise HTTPException(409, f"Workspace slug '{body.slug}' already exists") + raise + return WorkspaceResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + slug=row["slug"], settings=row["settings"] or {}, + created_at=_ts(row["created_at"]), + ) + + +@router.get("", response_model=List[WorkspaceResponse]) +async def list_workspaces( + auth: AuthContext = Depends(get_auth_context), +) -> List[WorkspaceResponse]: + pool = await get_pool() + async with pool.acquire() as conn: + rows = await conn.fetch( + "SELECT * FROM workspaces WHERE org_id = $1 AND archived_at IS NULL ORDER BY name", + auth.org_id, + ) + return [ + WorkspaceResponse( + id=r["id"], org_id=r["org_id"], name=r["name"], + slug=r["slug"], settings=r["settings"] or {}, + created_at=_ts(r["created_at"]), + ) + for r in rows + ] + + +@router.get("/{workspace_id}", response_model=WorkspaceResponse) +async def get_workspace( + workspace_id: str, + auth: AuthContext = Depends(get_auth_context), +) -> WorkspaceResponse: + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + "SELECT * FROM workspaces WHERE id = $1 AND org_id = $2", + workspace_id, auth.org_id, + ) + if not row: + raise HTTPException(404, "Workspace not found") + return WorkspaceResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + slug=row["slug"], settings=row["settings"] or {}, + created_at=_ts(row["created_at"]), + archived_at=_ts(row["archived_at"]), + ) + + +@router.patch("/{workspace_id}", response_model=WorkspaceResponse) +async def update_workspace( + workspace_id: str, + body: Dict[str, Any], + auth: AuthContext = Depends(require_role("admin")), +) -> WorkspaceResponse: + import json + pool = await get_pool() + async with pool.acquire() as conn: + updates = [] + params: list = [workspace_id, auth.org_id] + if "name" in body: + params.append(body["name"]) + updates.append(f"name = ${len(params)}") + if "settings" in body: + params.append(json.dumps(body["settings"])) + updates.append(f"settings = ${len(params)}::jsonb") + if not updates: + raise HTTPException(400, "No fields to update") + row = await conn.fetchrow( + f"UPDATE workspaces SET {', '.join(updates)} WHERE id = $1 AND org_id = $2 RETURNING *", + *params, + ) + if not row: + raise HTTPException(404, "Workspace not found") + return WorkspaceResponse( + id=row["id"], org_id=row["org_id"], name=row["name"], + slug=row["slug"], settings=row["settings"] or {}, + created_at=_ts(row["created_at"]), + ) + + +@router.delete("/{workspace_id}", status_code=204) +async def archive_workspace( + workspace_id: str, + auth: AuthContext = Depends(require_role("admin")), +) -> None: + pool = await get_pool() + async with pool.acquire() as conn: + result = await conn.execute( + "UPDATE workspaces SET archived_at = now() WHERE id = $1 AND org_id = $2", + workspace_id, auth.org_id, + ) + if result == "UPDATE 0": + raise HTTPException(404, "Workspace not found") + + +@router.post("/{workspace_id}/members", response_model=MemberResponse, status_code=201) +async def add_member( + workspace_id: str, + body: MemberAddRequest, + auth: AuthContext = Depends(require_role("admin")), +) -> MemberResponse: + from ulid import ULID + member_id = str(ULID()) + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + """INSERT INTO workspace_members (id, workspace_id, user_id, role) + VALUES ($1, $2, $3, $4) RETURNING *""", + member_id, workspace_id, body.user_id, body.role, + ) + return MemberResponse( + id=row["id"], workspace_id=row["workspace_id"], + user_id=row["user_id"], role=row["role"], + invited_at=_ts(row["invited_at"]), + ) + + +@router.get("/{workspace_id}/members", response_model=List[MemberResponse]) +async def list_members( + workspace_id: str, + auth: AuthContext = Depends(get_auth_context), +) -> List[MemberResponse]: + pool = await get_pool() + async with pool.acquire() as conn: + rows = await conn.fetch( + "SELECT * FROM workspace_members WHERE workspace_id = $1", + workspace_id, + ) + return [ + MemberResponse( + id=r["id"], workspace_id=r["workspace_id"], + user_id=r["user_id"], role=r["role"], + invited_at=_ts(r["invited_at"]), + accepted_at=_ts(r["accepted_at"]), + ) + for r in rows + ] + + +@router.patch("/{workspace_id}/members/{user_id}") +async def update_member_role( + workspace_id: str, + user_id: str, + body: Dict[str, str], + auth: AuthContext = Depends(require_role("admin")), +) -> Dict[str, str]: + pool = await get_pool() + async with pool.acquire() as conn: + result = await conn.execute( + "UPDATE workspace_members SET role = $1 WHERE workspace_id = $2 AND user_id = $3", + body.get("role", "writer"), workspace_id, user_id, + ) + if result == "UPDATE 0": + raise HTTPException(404, "Member not found") + return {"status": "updated"} + + +@router.delete("/{workspace_id}/members/{user_id}", status_code=204) +async def remove_member( + workspace_id: str, + user_id: str, + auth: AuthContext = Depends(require_role("admin")), +) -> None: + pool = await get_pool() + async with pool.acquire() as conn: + result = await conn.execute( + "DELETE FROM workspace_members WHERE workspace_id = $1 AND user_id = $2", + workspace_id, user_id, + ) + if result == "DELETE 0": + raise HTTPException(404, "Member not found") diff --git a/src/lore/server/scheduler.py b/src/lore/server/scheduler.py new file mode 100644 index 0000000..dfac52a --- /dev/null +++ b/src/lore/server/scheduler.py @@ -0,0 +1,80 @@ +"""Background scheduler for retention policy enforcement.""" + +from __future__ import annotations + +import asyncio +import logging + +logger = logging.getLogger(__name__) + + +async def policy_scheduler_loop(interval_seconds: int = 60) -> None: + """Background task that enforces retention policies. + + Runs indefinitely every ``interval_seconds``: + 1. Check snapshot schedules (cron match) and trigger snapshots + 2. Enforce retention windows (cleanup expired memories) + """ + while True: + try: + await _enforce_policies() + except Exception: + logger.warning("Policy enforcement iteration failed", exc_info=True) + await asyncio.sleep(interval_seconds) + + +async def _enforce_policies() -> None: + """Iterate active policies and enforce retention rules.""" + from lore.server.db import get_pool + + pool = await get_pool() + async with pool.acquire() as conn: + policies = await conn.fetch( + "SELECT * FROM retention_policies WHERE is_active = TRUE" + ) + + for policy in policies: + try: + retention = policy["retention_window"] or {} + + # Enforce working tier retention + working_ttl = retention.get("working") + if working_ttl is not None: + await conn.execute( + """DELETE FROM memories + WHERE org_id = $1 + AND meta->>'tier' = 'working' + AND created_at < now() - make_interval(secs => $2)""", + policy["org_id"], working_ttl, + ) + + # Enforce short tier retention + short_ttl = retention.get("short") + if short_ttl is not None: + await conn.execute( + """DELETE FROM memories + WHERE org_id = $1 + AND meta->>'tier' = 'short' + AND created_at < now() - make_interval(secs => $2)""", + policy["org_id"], short_ttl, + ) + + # Prune excess snapshots + max_snaps = policy["max_snapshots"] or 50 + excess = await conn.fetch( + """SELECT id, path FROM snapshot_metadata + WHERE policy_id = $1 + ORDER BY created_at DESC + OFFSET $2""", + policy["id"], max_snaps, + ) + for snap in excess: + await conn.execute( + "DELETE FROM snapshot_metadata WHERE id = $1", + snap["id"], + ) + + except Exception: + logger.warning( + "Failed to enforce policy %s", policy["id"], exc_info=True, + ) diff --git a/src/lore/server/slo_checker.py b/src/lore/server/slo_checker.py new file mode 100644 index 0000000..3445438 --- /dev/null +++ b/src/lore/server/slo_checker.py @@ -0,0 +1,189 @@ +"""Background SLO checker — evaluates SLO definitions periodically.""" + +from __future__ import annotations + +import asyncio +import json +import logging +from typing import Any, Dict, List + +logger = logging.getLogger(__name__) + + +async def slo_checker_loop(interval_seconds: int = 60) -> None: + """Background task that evaluates SLOs and creates alerts. + + Runs indefinitely, checking all enabled SLO definitions every + ``interval_seconds``. When a threshold is breached, inserts a + ``slo_alerts`` row and dispatches to configured alert channels. + """ + + while True: + try: + await _check_all_slos() + except Exception: + logger.warning("SLO check iteration failed", exc_info=True) + + await asyncio.sleep(interval_seconds) + + +async def _check_all_slos() -> None: + """Evaluate all enabled SLOs and fire alerts for breaches.""" + from lore.server.db import get_pool + from lore.server.routes.slo import _check_threshold, _compute_metric + + pool = await get_pool() + async with pool.acquire() as conn: + slos = await conn.fetch( + """SELECT id, org_id, name, metric, operator, threshold, + window_minutes, alert_channels + FROM slo_definitions + WHERE enabled = TRUE""" + ) + + for slo in slos: + try: + value = await _compute_metric( + conn, slo["org_id"], slo["metric"], slo["window_minutes"], + ) + passing = _check_threshold( + value, slo["operator"], float(slo["threshold"]), + ) + + if not passing and value is not None: + # Check if there's already a recent firing alert (debounce) + recent = await conn.fetchval( + """SELECT id FROM slo_alerts + WHERE slo_id = $1 AND status = 'firing' + AND created_at > now() - interval '5 minutes'""", + slo["id"], + ) + if recent: + continue + + # Create alert + channels = slo["alert_channels"] or [] + dispatched: List[Dict[str, Any]] = [] + + for channel in channels: + try: + await _dispatch_alert(channel, slo, value) + dispatched.append({ + "channel": channel.get("type", "unknown"), + "status": "sent", + }) + except Exception as e: + dispatched.append({ + "channel": channel.get("type", "unknown"), + "status": "failed", + "error": str(e), + }) + + await conn.execute( + """INSERT INTO slo_alerts + (org_id, slo_id, metric_value, threshold, status, dispatched_to) + VALUES ($1, $2, $3, $4, 'firing', $5::jsonb)""", + slo["org_id"], slo["id"], value, + float(slo["threshold"]), + json.dumps(dispatched), + ) + logger.info( + "SLO breach: %s (value=%.4f, threshold=%.4f)", + slo["name"], value, float(slo["threshold"]), + ) + + except Exception: + logger.warning( + "Failed to check SLO %s", slo["id"], exc_info=True, + ) + + +async def _dispatch_alert( + channel: Dict[str, Any], + slo: Any, + value: float, +) -> None: + """Dispatch an alert to a configured channel.""" + channel_type = channel.get("type", "") + + if channel_type == "webhook": + await _dispatch_webhook(channel, slo, value) + elif channel_type == "email": + _dispatch_email(channel, slo, value) + else: + logger.warning("Unknown alert channel type: %s", channel_type) + + +async def _dispatch_webhook( + channel: Dict[str, Any], + slo: Any, + value: float, +) -> None: + """Send webhook alert via httpx.""" + url = channel.get("url") + if not url: + return + + payload = { + "slo_name": slo["name"], + "metric": slo["metric"], + "value": value, + "threshold": float(slo["threshold"]), + "operator": slo["operator"], + "status": "firing", + } + + try: + import httpx + async with httpx.AsyncClient(timeout=10) as client: + await client.post(url, json=payload) + except ImportError: + # Fallback to urllib + import urllib.request + req = urllib.request.Request( + url, + data=json.dumps(payload).encode(), + headers={"Content-Type": "application/json"}, + method="POST", + ) + urllib.request.urlopen(req, timeout=10) + + +def _dispatch_email( + channel: Dict[str, Any], + slo: Any, + value: float, +) -> None: + """Send email alert via smtplib.""" + import os + + smtp_host = os.environ.get("SMTP_HOST") + smtp_port = int(os.environ.get("SMTP_PORT", "587")) + smtp_user = os.environ.get("SMTP_USER", "") + smtp_pass = os.environ.get("SMTP_PASS", "") + from_addr = os.environ.get("SMTP_FROM", smtp_user) + to_addr = channel.get("email") + + if not smtp_host or not to_addr: + logger.warning("Email alert skipped — SMTP not configured") + return + + import smtplib + from email.message import EmailMessage + + msg = EmailMessage() + msg["Subject"] = f"SLO Alert: {slo['name']} breached" + msg["From"] = from_addr + msg["To"] = to_addr + msg.set_content( + f"SLO '{slo['name']}' is breaching.\n\n" + f"Metric: {slo['metric']}\n" + f"Current value: {value}\n" + f"Threshold: {slo['threshold']} ({slo['operator']})\n" + ) + + with smtplib.SMTP(smtp_host, smtp_port) as server: + if smtp_user: + server.starttls() + server.login(smtp_user, smtp_pass) + server.send_message(msg) diff --git a/src/lore/setup.py b/src/lore/setup.py index db6d424..c8666be 100644 --- a/src/lore/setup.py +++ b/src/lore/setup.py @@ -5,8 +5,10 @@ import json import os import stat +import subprocess import sys from pathlib import Path +from typing import Optional # ── Hook script templates ────────────────────────────────────────── @@ -327,6 +329,145 @@ def _codex_config_path() -> Path: return Path.cwd() / "codex.yaml" +# ── Validation / connection helpers ──────────────────────────────── + + +def _backup_config(path: Path) -> Optional[Path]: + """Create a timestamped backup of a config file. Keeps max 3 backups.""" + if not path.exists(): + return None + + from datetime import datetime + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + backup_path = path.parent / f"{path.name}.lore-backup.{timestamp}" + + import shutil + shutil.copy2(path, backup_path) + + # Prune old backups (keep max 3) + pattern = f"{path.name}.lore-backup.*" + backups = sorted(path.parent.glob(pattern), key=lambda p: p.stat().st_mtime, reverse=True) + for old_backup in backups[3:]: + old_backup.unlink(missing_ok=True) + + return backup_path + + +def _validate_hook(hook_path: Path) -> list[str]: + """Validate a hook script: bash syntax check + execute permission.""" + errors: list[str] = [] + if not hook_path.exists(): + errors.append(f"Hook file does not exist: {hook_path}") + return errors + + # Check execute permission + if not os.access(hook_path, os.X_OK): + errors.append(f"Hook is not executable: {hook_path}") + + # Bash syntax check + try: + result = subprocess.run( + ["bash", "-n", str(hook_path)], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode != 0: + errors.append(f"Bash syntax error: {result.stderr.strip()}") + except (subprocess.TimeoutExpired, FileNotFoundError): + pass # bash not available, skip syntax check + + return errors + + +def _validate_config(config_path: Path, runtime: str) -> list[str]: + """Validate a config file: JSON/YAML syntax + required keys.""" + errors: list[str] = [] + if not config_path.exists(): + errors.append(f"Config file does not exist: {config_path}") + return errors + + content = config_path.read_text() + + if config_path.suffix == ".json": + try: + data = json.loads(content) + except json.JSONDecodeError as e: + errors.append(f"Invalid JSON: {e}") + return errors + + # Check for hooks key + if "hooks" not in data: + errors.append("Config missing 'hooks' key") + elif config_path.suffix in (".yaml", ".yml"): + try: + import yaml + data = yaml.safe_load(content) + if data is None: + errors.append("Config file is empty") + elif "hooks" not in (data or {}): + errors.append("Config missing 'hooks' key") + except ImportError: + pass # Can't validate YAML without PyYAML + except Exception as e: + errors.append(f"Invalid YAML: {e}") + + return errors + + +def _test_connection(server_url: str, api_key: Optional[str] = None) -> dict: + """Test connectivity to a Lore server.""" + import time + import urllib.error + import urllib.request + + result: dict = {"status": "unknown", "health": None, "retrieve": None, "latency_ms": 0} + start = time.monotonic() + + headers = {} + if api_key: + headers["Authorization"] = f"Bearer {api_key}" + + # Test /health + try: + req = urllib.request.Request(f"{server_url}/health", headers=headers) + with urllib.request.urlopen(req, timeout=5) as resp: + result["health"] = resp.status == 200 + except Exception as e: + result["health"] = False + result["error"] = str(e) + + # Test /v1/retrieve (requires auth) + if api_key and result["health"]: + try: + import urllib.parse + query = urllib.parse.quote("test") + req = urllib.request.Request( + f"{server_url}/v1/retrieve?query={query}&limit=1", + headers=headers, + ) + with urllib.request.urlopen(req, timeout=5) as resp: + result["retrieve"] = resp.status == 200 + except Exception: + result["retrieve"] = False + + result["latency_ms"] = round((time.monotonic() - start) * 1000, 2) + result["status"] = "ok" if result["health"] else "unreachable" + + return result + + +def _show_rollback_instructions(runtime: str, backup_paths: list[Path]) -> None: + """Print instructions for rolling back to backup configs.""" + if not backup_paths: + print(" No backups to restore from.") + return + print(" To rollback, restore from backups:") + for bp in backup_paths: + original = bp.parent / bp.name.split(".lore-backup.")[0] + print(f" cp {bp} {original}") + + # ── Setup functions ──────────────────────────────────────────────── diff --git a/src/lore/store/base.py b/src/lore/store/base.py index b8ad568..e680cc9 100644 --- a/src/lore/store/base.py +++ b/src/lore/store/base.py @@ -14,6 +14,7 @@ Memory, RejectedPattern, Relationship, + ReviewDecision, ) @@ -265,3 +266,27 @@ def is_rejected_pattern( def list_rejected_patterns(self, limit: int = 100) -> List[RejectedPattern]: """List rejected patterns.""" return [] + + # ------------------------------------------------------------------ + # Review decisions (F5) + # ------------------------------------------------------------------ + + def save_review_decision(self, decision: ReviewDecision) -> None: + """Save a review decision. No-op by default.""" + pass + + def list_review_decisions( + self, relationship_id: Optional[str] = None, limit: int = 50, + ) -> List[ReviewDecision]: + """List review decisions. Returns empty list by default.""" + return [] + + # ------------------------------------------------------------------ + # Retention policy cleanup (F6) + # ------------------------------------------------------------------ + + def cleanup_by_retention_policy( + self, tier: str, max_age_seconds: int, + ) -> int: + """Delete memories older than max_age_seconds in the given tier. Returns count. No-op by default.""" + return 0 diff --git a/src/lore/types.py b/src/lore/types.py index 932b131..2643ad0 100644 --- a/src/lore/types.py +++ b/src/lore/types.py @@ -486,3 +486,54 @@ class RelatedEntity: entity_type: str relationship: str direction: str + + +# ------------------------------------------------------------------ +# Review decisions audit trail (F5) +# ------------------------------------------------------------------ + +@dataclass +class ReviewDecision: + """A record of an approval/rejection decision on a relationship.""" + + id: str + relationship_id: str + action: str # "approve" or "reject" + reviewer_id: Optional[str] = None + notes: Optional[str] = None + decided_at: str = "" + + +# ------------------------------------------------------------------ +# Retention policies (F6) +# ------------------------------------------------------------------ + +@dataclass +class RetentionPolicy: + """A declarative lifecycle policy for retention and snapshots.""" + + id: str + org_id: str + name: str + retention_window: Optional[Dict[str, Any]] = None + snapshot_schedule: Optional[str] = None + encryption_required: bool = False + max_snapshots: int = 50 + is_active: bool = True + created_at: str = "" + updated_at: str = "" + + +@dataclass +class RestoreDrillResult: + """Result of a restore drill execution.""" + + id: str + org_id: str + snapshot_name: str + status: str = "running" # running, success, failed + started_at: str = "" + completed_at: Optional[str] = None + recovery_time_ms: Optional[int] = None + memories_restored: Optional[int] = None + error: Optional[str] = None diff --git a/src/lore/ui/src/api.js b/src/lore/ui/src/api.js index 6ce742f..4f0884e 100644 --- a/src/lore/ui/src/api.js +++ b/src/lore/ui/src/api.js @@ -169,4 +169,41 @@ export async function fetchNeighbors(id) { return result; } +// SLO Dashboard (F3) +export async function fetchSloDashboard() { + return fetchJSON('/v1/slo/status'); +} + +export async function fetchSloAlerts(limit = 50) { + return fetchJSON(`/v1/slo/alerts?limit=${limit}`); +} + +// Profiles (F4) +export async function fetchProfiles() { + return fetchJSON('/v1/profiles'); +} + +// Policies (F6) +export async function fetchPolicies() { + return fetchJSON('/v1/policies'); +} + +export async function fetchPolicyCompliance() { + return fetchJSON('/v1/policies/compliance'); +} + +// Workspaces (F7) +export async function fetchWorkspaces() { + return fetchJSON('/v1/workspaces'); +} + +// Recommendations (F9) +export async function fetchRecommendations(context = '', maxResults = 3) { + return fetchJSON('/v1/recommendations', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ context, max_results: maxResults }), + }); +} + export { cache }; diff --git a/src/lore/ui/src/panels/slo.js b/src/lore/ui/src/panels/slo.js new file mode 100644 index 0000000..4713b17 --- /dev/null +++ b/src/lore/ui/src/panels/slo.js @@ -0,0 +1,151 @@ +// SLO Dashboard Panel — status cards, latency chart, hit-rate chart, alert timeline + +import { fetchJSON } from '../api.js'; + +export async function fetchSloDashboard() { + return fetchJSON('/v1/slo/status'); +} + +export async function fetchSloAlerts(limit = 50) { + return fetchJSON(`/v1/slo/alerts?limit=${limit}`); +} + +export async function fetchSloTimeseries(metric = 'p99_latency', hours = 24) { + return fetchJSON(`/v1/slo/timeseries?metric=${metric}&window_hours=${hours}`); +} + +export class SloPanel { + constructor(container, state) { + this.container = container; + this.state = state; + this.visible = false; + this._render(); + } + + toggle() { + this.visible = !this.visible; + this.container.style.display = this.visible ? 'block' : 'none'; + if (this.visible) this.refresh(); + } + + async refresh() { + try { + const [status, alerts] = await Promise.all([ + fetchSloDashboard(), + fetchSloAlerts(20), + ]); + this._renderStatus(status); + this._renderAlerts(alerts); + } catch (err) { + const errorDiv = this.container.querySelector('.slo-panel'); + if (errorDiv) errorDiv.textContent = 'Failed to load SLO data: ' + err.message; + } + } + + _render() { + // Build DOM safely without innerHTML + this.container.style.display = 'none'; + const panel = document.createElement('div'); + panel.className = 'slo-panel'; + + const h3 = document.createElement('h3'); + h3.textContent = 'SLO Dashboard'; + panel.appendChild(h3); + + const cards = document.createElement('div'); + cards.id = 'slo-status-cards'; + cards.className = 'slo-cards'; + panel.appendChild(cards); + + const chart = document.createElement('div'); + chart.id = 'slo-chart'; + chart.className = 'slo-chart'; + panel.appendChild(chart); + + const h4 = document.createElement('h4'); + h4.textContent = 'Recent Alerts'; + panel.appendChild(h4); + + const alertsDiv = document.createElement('div'); + alertsDiv.id = 'slo-alerts'; + alertsDiv.className = 'slo-alerts'; + panel.appendChild(alertsDiv); + + this.container.replaceChildren(panel); + } + + _renderStatus(slos) { + const cardsEl = this.container.querySelector('#slo-status-cards'); + cardsEl.replaceChildren(); + + if (!slos || slos.length === 0) { + const empty = document.createElement('div'); + empty.className = 'slo-empty'; + empty.textContent = 'No SLOs configured'; + cardsEl.appendChild(empty); + return; + } + + for (const slo of slos) { + const card = document.createElement('div'); + card.className = 'slo-card ' + (slo.passing ? 'slo-pass' : 'slo-fail'); + + const icon = document.createElement('span'); + icon.className = 'slo-icon'; + icon.textContent = slo.passing ? '\u2713' : '\u2717'; + card.appendChild(icon); + + const info = document.createElement('div'); + info.className = 'slo-info'; + + const name = document.createElement('div'); + name.className = 'slo-name'; + name.textContent = slo.name; + info.appendChild(name); + + const metric = document.createElement('div'); + metric.className = 'slo-metric'; + const value = slo.current_value != null ? slo.current_value.toFixed(2) : 'N/A'; + metric.textContent = slo.metric + ': ' + value + ' (' + slo.operator + ' ' + slo.threshold + ')'; + info.appendChild(metric); + + card.appendChild(info); + cardsEl.appendChild(card); + } + } + + _renderAlerts(alerts) { + const alertsEl = this.container.querySelector('#slo-alerts'); + alertsEl.replaceChildren(); + + if (!alerts || alerts.length === 0) { + const empty = document.createElement('div'); + empty.className = 'slo-empty'; + empty.textContent = 'No recent alerts'; + alertsEl.appendChild(empty); + return; + } + + for (const a of alerts) { + const row = document.createElement('div'); + row.className = 'slo-alert ' + (a.status === 'firing' ? 'alert-firing' : 'alert-resolved'); + + const status = document.createElement('span'); + status.className = 'alert-status'; + status.textContent = a.status; + row.appendChild(status); + + const val = document.createElement('span'); + val.className = 'alert-value'; + val.textContent = a.metric_value.toFixed(2) + ' / ' + a.threshold.toFixed(2); + row.appendChild(val); + + const time = document.createElement('span'); + time.className = 'alert-time'; + time.textContent = a.created_at ? new Date(a.created_at).toLocaleString() : 'unknown'; + row.appendChild(time); + + alertsEl.appendChild(row); + } + } +} diff --git a/tests/server/test_policies.py b/tests/server/test_policies.py new file mode 100644 index 0000000..bfc2a6c --- /dev/null +++ b/tests/server/test_policies.py @@ -0,0 +1,70 @@ +"""Tests for Retention Policies (F6).""" + +from __future__ import annotations + + +class TestPolicyModels: + def test_policy_create_request_defaults(self): + from lore.server.routes.policies import PolicyCreateRequest + req = PolicyCreateRequest(name="prod") + assert req.max_snapshots == 50 + assert req.is_active is True + assert req.encryption_required is False + assert req.retention_window == {"working": 3600, "short": 604800, "long": None} + + def test_policy_response(self): + from lore.server.routes.policies import PolicyResponse + resp = PolicyResponse( + id="pol-1", org_id="org-1", name="prod", + retention_window={"working": 3600, "short": 604800, "long": None}, + encryption_required=False, + max_snapshots=30, + is_active=True, + ) + assert resp.name == "prod" + assert resp.max_snapshots == 30 + + def test_drill_result_response(self): + from lore.server.routes.policies import DrillResultResponse + drill = DrillResultResponse( + id="drill-1", + snapshot_name="snap-2024", + status="success", + recovery_time_ms=1500, + memories_restored=100, + ) + assert drill.status == "success" + assert drill.recovery_time_ms == 1500 + + def test_compliance_response(self): + from lore.server.routes.policies import ComplianceResponse + comp = ComplianceResponse( + policy_id="pol-1", + policy_name="prod", + compliant=False, + issues=["Snapshot count exceeds max"], + ) + assert comp.compliant is False + assert len(comp.issues) == 1 + + +class TestRetentionTypes: + def test_retention_policy_type(self): + from lore.types import RetentionPolicy + policy = RetentionPolicy( + id="pol-1", + org_id="org-1", + name="prod", + ) + assert policy.name == "prod" + assert policy.max_snapshots == 50 + + def test_restore_drill_result_type(self): + from lore.types import RestoreDrillResult + result = RestoreDrillResult( + id="drill-1", + org_id="org-1", + snapshot_name="snap-1", + status="success", + ) + assert result.status == "success" diff --git a/tests/server/test_review_enhanced.py b/tests/server/test_review_enhanced.py new file mode 100644 index 0000000..0ba5165 --- /dev/null +++ b/tests/server/test_review_enhanced.py @@ -0,0 +1,57 @@ +"""Tests for Enhanced Review with Risk Scoring (F5).""" + +from __future__ import annotations + + +class TestReviewDecisionType: + def test_review_decision_dataclass(self): + from lore.types import ReviewDecision + decision = ReviewDecision( + id="dec-1", + relationship_id="rel-1", + action="approve", + reviewer_id="user-1", + notes="Looks correct", + ) + assert decision.action == "approve" + assert decision.notes == "Looks correct" + + def test_review_decision_reject(self): + from lore.types import ReviewDecision + decision = ReviewDecision( + id="dec-2", + relationship_id="rel-2", + action="reject", + notes="Spurious connection", + ) + assert decision.action == "reject" + + +class TestReviewModels: + def test_review_action_request(self): + from lore.server.routes.review import ReviewActionRequest + req = ReviewActionRequest(action="approve", reason="verified") + assert req.action == "approve" + + def test_bulk_review_request(self): + from lore.server.routes.review import BulkReviewRequest + req = BulkReviewRequest( + action="approve", + ids=["rel-1", "rel-2"], + reason="Bulk verified", + ) + assert len(req.ids) == 2 + + +class TestReviewItemResponse: + def test_review_item_response(self): + from lore.server.routes.review import ReviewItemResponse + item = ReviewItemResponse( + id="rel-1", + source_entity={"id": "e1", "name": "Python", "entity_type": "language"}, + target_entity={"id": "e2", "name": "FastAPI", "entity_type": "framework"}, + rel_type="uses", + weight=0.8, + ) + assert item.rel_type == "uses" + assert item.source_entity["name"] == "Python" diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py new file mode 100644 index 0000000..f2e3ed8 --- /dev/null +++ b/tests/test_bootstrap.py @@ -0,0 +1,159 @@ +"""Tests for lore bootstrap command.""" + +from __future__ import annotations + +import sys +from unittest.mock import MagicMock, patch + +from lore.bootstrap import BootstrapRunner, CheckResult, format_results + + +class TestCheckPythonVersion: + def test_passes_on_current_python(self): + runner = BootstrapRunner() + result = runner.check_python_version() + assert result.status == "ok" + assert "3." in result.message + + def test_fails_on_old_python(self): + runner = BootstrapRunner() + with patch.object(sys, "version_info", (3, 9, 0)): + result = runner.check_python_version() + assert result.status == "fail" + assert "3.9" in result.message + + +class TestCheckEnvVars: + def test_passes_with_db_url_arg(self): + runner = BootstrapRunner(db_url="postgresql://localhost/lore") + result = runner.check_env_vars() + assert result.status == "ok" + + def test_passes_with_env_var(self): + runner = BootstrapRunner() + with patch.dict("os.environ", {"DATABASE_URL": "postgresql://localhost/lore"}): + result = runner.check_env_vars() + assert result.status == "ok" + + def test_fails_without_db_url(self): + runner = BootstrapRunner() + with patch.dict("os.environ", {}, clear=True): + result = runner.check_env_vars() + assert result.status == "fail" + + +class TestCheckDocker: + def test_passes_when_docker_available(self): + runner = BootstrapRunner() + with patch("shutil.which", return_value="/usr/bin/docker"), \ + patch("subprocess.run") as mock_run: + mock_run.return_value = MagicMock(returncode=0) + result = runner.check_docker() + assert result.status == "ok" + + def test_fails_when_docker_not_found(self): + runner = BootstrapRunner() + with patch("shutil.which", return_value=None): + result = runner.check_docker() + assert result.status == "fail" + + def test_warns_when_daemon_not_running(self): + runner = BootstrapRunner() + with patch("shutil.which", return_value="/usr/bin/docker"), \ + patch("subprocess.run") as mock_run: + mock_run.return_value = MagicMock(returncode=1) + result = runner.check_docker() + assert result.status == "warn" + + +class TestCheckPostgres: + def test_fails_without_db_url(self): + runner = BootstrapRunner() + with patch.dict("os.environ", {}, clear=True): + result = runner.check_postgres() + assert result.status == "fail" + + def test_warns_without_asyncpg(self): + runner = BootstrapRunner(db_url="postgresql://localhost/lore") + with patch("shutil.which", return_value=None), \ + patch.dict("sys.modules", {"asyncpg": None}): + # asyncpg import will fail + result = runner.check_postgres() + assert result.status in ("warn", "fail") + + +class TestCheckPgvector: + def test_fails_without_db_url(self): + runner = BootstrapRunner() + with patch.dict("os.environ", {}, clear=True): + result = runner.check_pgvector() + assert result.status == "fail" + + +class TestFormatResults: + def test_all_ok(self): + results = [ + CheckResult("test1", "ok", "All good"), + CheckResult("test2", "ok", "Fine"), + ] + output = format_results(results) + assert "2 passed" in output + assert "0 failed" in output + assert "ready" in output.lower() + + def test_with_failures(self): + results = [ + CheckResult("test1", "ok", "Good"), + CheckResult("test2", "fail", "Bad", fix_hint="Fix it"), + ] + output = format_results(results) + assert "1 passed" in output + assert "1 failed" in output + assert "Fix it" in output + + def test_verbose_shows_all_hints(self): + results = [ + CheckResult("test1", "warn", "Maybe", fix_hint="Try this"), + ] + output = format_results(results, verbose=True) + assert "Try this" in output + + +class TestRunAll: + def test_skips_docker_and_server(self): + runner = BootstrapRunner( + db_url="postgresql://localhost/lore", + skip_docker=True, + skip_server=True, + ) + with patch.object(runner, "check_postgres", return_value=CheckResult("pg", "ok", "ok")), \ + patch.object(runner, "check_pgvector", return_value=CheckResult("pgv", "ok", "ok")), \ + patch.object(runner, "run_migrations", return_value=CheckResult("mig", "ok", "ok")): + results = runner.run_all() + names = [r.name for r in results] + assert "docker" not in names + assert "server_start" not in names + assert "health" not in names + assert "python_version" in names + assert "env_vars" in names + + +class TestBootstrapCLI: + def test_bootstrap_command_exists(self): + from lore.cli import build_parser + parser = build_parser() + args = parser.parse_args(["bootstrap"]) + assert args.command == "bootstrap" + + def test_bootstrap_with_flags(self): + from lore.cli import build_parser + parser = build_parser() + args = parser.parse_args([ + "bootstrap", "--fix", "--skip-docker", "--skip-server", + "--db-url", "postgresql://localhost/lore", "--verbose", + ]) + assert args.fix is True + assert args.skip_docker is True + assert args.skip_server is True + assert args.db_url == "postgresql://localhost/lore" + assert args.verbose is True diff --git a/tests/test_plugin_registry.py b/tests/test_plugin_registry.py new file mode 100644 index 0000000..98f64f8 --- /dev/null +++ b/tests/test_plugin_registry.py @@ -0,0 +1,182 @@ +"""Tests for Plugin Registry (F8).""" + +from __future__ import annotations + +from unittest.mock import MagicMock + +import pytest + +from lore.plugin.base import LorePlugin, PluginMeta + + +class MockPlugin(LorePlugin): + meta = PluginMeta(name="mock-plugin", version="1.0.0", description="Test plugin", priority=50) + + def on_remember(self, memory): + return memory + + def on_recall(self, query, results): + return results + + +class TestPluginRegistry: + def test_register_and_list(self): + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + plugin = MockPlugin() + registry._plugins["mock-plugin"] = plugin + plugins = registry.list_plugins() + assert len(plugins) == 1 + assert plugins[0]["name"] == "mock-plugin" + assert plugins[0]["enabled"] is True + + def test_enable_disable(self): + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + registry._plugins["mock-plugin"] = MockPlugin() + + assert registry.disable("mock-plugin") is True + plugins = registry.list_plugins() + assert plugins[0]["enabled"] is False + + assert registry.enable("mock-plugin") is True + plugins = registry.list_plugins() + assert plugins[0]["enabled"] is True + + def test_disable_nonexistent(self): + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + assert registry.disable("nonexistent") is False + + def test_get_active(self): + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + registry._plugins["mock-plugin"] = MockPlugin() + active = registry.get_active() + assert len(active) == 1 + + registry.disable("mock-plugin") + active = registry.get_active() + assert len(active) == 0 + + def test_get_plugin(self): + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + registry._plugins["mock-plugin"] = MockPlugin() + assert registry.get("mock-plugin") is not None + assert registry.get("nonexistent") is None + + def test_cleanup_all(self): + from lore.plugin.registry import PluginRegistry + registry = PluginRegistry() + plugin = MockPlugin() + plugin.cleanup = MagicMock() + registry._plugins["mock-plugin"] = plugin + registry.cleanup_all() + plugin.cleanup.assert_called_once() + + def test_priority_ordering(self): + from lore.plugin.registry import PluginRegistry + + class HighPriorityPlugin(LorePlugin): + meta = PluginMeta(name="high", version="1.0", priority=10) + + class LowPriorityPlugin(LorePlugin): + meta = PluginMeta(name="low", version="1.0", priority=200) + + registry = PluginRegistry() + registry._plugins["low"] = LowPriorityPlugin() + registry._plugins["high"] = HighPriorityPlugin() + + active = registry.get_active() + assert active[0].meta.name == "high" + assert active[1].meta.name == "low" + + +class TestPluginHooks: + def test_dispatch_on_remember(self): + from lore.plugin.hooks import dispatch_on_remember + plugin = MockPlugin() + memory = {"id": "test", "content": "hello"} + result = dispatch_on_remember([plugin], memory) + assert result == memory + + def test_dispatch_on_recall(self): + from lore.plugin.hooks import dispatch_on_recall + plugin = MockPlugin() + results = [{"memory": "test"}] + out = dispatch_on_recall([plugin], "query", results) + assert out == results + + def test_error_isolation(self): + from lore.plugin.hooks import dispatch_on_remember + + class FailingPlugin(LorePlugin): + meta = PluginMeta(name="failing", version="1.0") + def on_remember(self, memory): + raise RuntimeError("Plugin crashed!") + + class GoodPlugin(LorePlugin): + meta = PluginMeta(name="good", version="1.0") + def on_remember(self, memory): + memory["processed"] = True + return memory + + memory = {"id": "test"} + # FailingPlugin should not prevent GoodPlugin from running + result = dispatch_on_remember([FailingPlugin(), GoodPlugin()], memory) + assert result.get("processed") is True + + def test_dispatch_on_score(self): + from lore.plugin.hooks import dispatch_on_score + + class BoostPlugin(LorePlugin): + meta = PluginMeta(name="boost", version="1.0") + def on_score(self, memory, score): + return score * 1.5 + + result = dispatch_on_score([BoostPlugin()], {}, 0.8) + assert result == pytest.approx(1.2) + + +class TestPluginScaffold: + def test_scaffold_creates_files(self, tmp_path): + from lore.plugin.scaffold import scaffold_plugin + project_dir = scaffold_plugin("my-tagger", output_dir=str(tmp_path)) + assert project_dir.exists() + assert (project_dir / "pyproject.toml").exists() + assert (project_dir / "my_tagger" / "plugin.py").exists() + assert (project_dir / "my_tagger" / "__init__.py").exists() + assert (project_dir / "tests" / "test_my_tagger.py").exists() + + def test_scaffold_content(self, tmp_path): + from lore.plugin.scaffold import scaffold_plugin + project_dir = scaffold_plugin("my-tagger", output_dir=str(tmp_path)) + content = (project_dir / "my_tagger" / "plugin.py").read_text() + assert "class MyTaggerPlugin" in content + assert "LorePlugin" in content + + def test_scaffold_pyproject(self, tmp_path): + from lore.plugin.scaffold import scaffold_plugin + project_dir = scaffold_plugin("my-tagger", output_dir=str(tmp_path)) + content = (project_dir / "pyproject.toml").read_text() + assert "lore-plugin-my-tagger" in content + assert "lore.plugins" in content + + +class TestPluginHarness: + def test_harness_add_memory(self): + from lore.plugin.harness import PluginTestHarness + harness = PluginTestHarness(MockPlugin()) + memory = harness.add_test_memory("test content") + assert memory.content == "test content" + assert len(harness.memories) == 1 + + def test_harness_run_hooks(self): + from lore.plugin.harness import PluginTestHarness + harness = PluginTestHarness(MockPlugin()) + harness.add_test_memory("test content") + results = harness.run_all_hooks() + assert "on_remember" in results + assert "on_recall" in results + assert "on_score" in results diff --git a/tests/test_recommendation_signals.py b/tests/test_recommendation_signals.py new file mode 100644 index 0000000..ad4a2ec --- /dev/null +++ b/tests/test_recommendation_signals.py @@ -0,0 +1,84 @@ +"""Tests for individual recommendation signal extractors (F9).""" + +from __future__ import annotations + +import struct + +import pytest + + +class TestContextSimilarity: + def _make_embedding(self, vec): + return struct.pack(f"{len(vec)}f", *vec) + + def test_identical_vectors(self): + from lore.recommend.signals import context_similarity + vec = [1.0, 0.0, 0.0] + emb = self._make_embedding(vec) + score, _ = context_similarity(vec, emb) + assert score == pytest.approx(1.0, abs=0.01) + + def test_orthogonal_vectors(self): + from lore.recommend.signals import context_similarity + vec1 = [1.0, 0.0, 0.0] + vec2 = [0.0, 1.0, 0.0] + emb = self._make_embedding(vec2) + score, _ = context_similarity(vec1, emb) + assert score == pytest.approx(0.0, abs=0.01) + + def test_zero_vector_returns_zero(self): + from lore.recommend.signals import context_similarity + vec = [0.0, 0.0, 0.0] + emb = self._make_embedding(vec) + score, _ = context_similarity([1.0, 0.0, 0.0], emb) + assert score == 0.0 + + +class TestEntityOverlap: + def test_case_insensitive(self): + from lore.recommend.signals import entity_overlap + score, _ = entity_overlap(["Python"], ["python"]) + assert score == 1.0 + + def test_partial_overlap(self): + from lore.recommend.signals import entity_overlap + score, explanation = entity_overlap( + ["python", "docker"], + ["python", "rust"], + ) + assert score == 0.5 + assert "python" in explanation.lower() + + +class TestTemporalPattern: + def test_same_hour(self): + from lore.recommend.signals import temporal_pattern + score, _ = temporal_pattern("2024-06-15T14:00:00+00:00", current_hour=14) + assert score > 0.0 + + def test_midnight_vs_noon(self): + from lore.recommend.signals import temporal_pattern + score, _ = temporal_pattern("2024-06-15T00:00:00+00:00", current_hour=12) + assert score == 0.0 + + def test_invalid_timestamp(self): + from lore.recommend.signals import temporal_pattern + score, _ = temporal_pattern("invalid", current_hour=12) + assert score == 0.0 + + +class TestAccessPattern: + def test_zero_access(self): + from lore.recommend.signals import access_pattern + score, _ = access_pattern(0, None) + assert score == 0.0 + + def test_moderate_access(self): + from lore.recommend.signals import access_pattern + score, _ = access_pattern(10, "2024-01-01T00:00:00") + assert 0.0 < score <= 0.5 + + def test_high_access_capped(self): + from lore.recommend.signals import access_pattern + score, _ = access_pattern(10000, "2024-01-01T00:00:00") + assert score <= 0.5 diff --git a/tests/test_recommendations.py b/tests/test_recommendations.py new file mode 100644 index 0000000..847bf96 --- /dev/null +++ b/tests/test_recommendations.py @@ -0,0 +1,139 @@ +"""Tests for Proactive Recommendations (F9).""" + +from __future__ import annotations + +import pytest + + +class TestSignals: + def test_entity_overlap_no_overlap(self): + from lore.recommend.signals import entity_overlap + score, explanation = entity_overlap(["python", "docker"], ["rust", "go"]) + assert score == 0.0 + + def test_entity_overlap_full_overlap(self): + from lore.recommend.signals import entity_overlap + score, explanation = entity_overlap(["python"], ["Python", "Docker"]) + assert score == 1.0 + + def test_entity_overlap_partial(self): + from lore.recommend.signals import entity_overlap + score, explanation = entity_overlap( + ["python", "docker", "postgres"], + ["python", "redis"], + ) + assert 0.0 < score < 1.0 + + def test_entity_overlap_empty_inputs(self): + from lore.recommend.signals import entity_overlap + score, _ = entity_overlap([], ["python"]) + assert score == 0.0 + score, _ = entity_overlap(["python"], []) + assert score == 0.0 + + def test_temporal_pattern_same_hour(self): + from datetime import datetime, timezone + + from lore.recommend.signals import temporal_pattern + now = datetime.now(timezone.utc) + score, _ = temporal_pattern(now.isoformat(), current_hour=now.hour) + assert score > 0.0 + + def test_temporal_pattern_opposite_hour(self): + from lore.recommend.signals import temporal_pattern + score, _ = temporal_pattern("2024-01-01T03:00:00+00:00", current_hour=15) + assert score == 0.0 + + def test_access_pattern_never_accessed(self): + from lore.recommend.signals import access_pattern + score, _ = access_pattern(0, None) + assert score == 0.0 + + def test_access_pattern_high_count(self): + from lore.recommend.signals import access_pattern + score, explanation = access_pattern(50, "2024-01-01T00:00:00") + assert score > 0.0 + assert "50" in explanation + + +class TestExplainer: + def test_explain_with_signals(self): + from lore.recommend.explainer import explain + from lore.recommend.types import RecommendationSignal + + signals = [ + RecommendationSignal("context", 0.9, 0.4, "High context match"), + RecommendationSignal("entity", 0.5, 0.25, "Shared entities: docker"), + RecommendationSignal("temporal", 0.1, 0.1, "Similar time"), + ] + result = explain(signals) + assert "Suggested because:" in result + assert "High context match" in result + + def test_explain_empty_signals(self): + from lore.recommend.explainer import explain + result = explain([]) + assert "No strong signals" in result + + def test_explain_zero_scores(self): + from lore.recommend.explainer import explain + from lore.recommend.types import RecommendationSignal + + signals = [ + RecommendationSignal("test", 0.0, 0.5, "Nothing"), + ] + result = explain(signals) + assert "Weak match" in result + + +class TestFeedbackRecorder: + def test_record_positive(self): + from lore.recommend.feedback import FeedbackRecorder + recorder = FeedbackRecorder() + recorder.record("mem-1", "positive", "user-1") + adj = recorder.get_weight_adjustment("user-1", "manual") + assert adj > 0 + + def test_record_negative(self): + from lore.recommend.feedback import FeedbackRecorder + recorder = FeedbackRecorder() + recorder.record("mem-1", "negative", "user-1") + adj = recorder.get_weight_adjustment("user-1", "manual") + assert adj < 0 + + def test_invalid_feedback_raises(self): + from lore.recommend.feedback import FeedbackRecorder + recorder = FeedbackRecorder() + with pytest.raises(ValueError): + recorder.record("mem-1", "invalid", "user-1") + + def test_weight_clamping(self): + from lore.recommend.feedback import FeedbackRecorder + recorder = FeedbackRecorder() + for _ in range(100): + recorder.record("mem-1", "positive", "user-1") + adj = recorder.get_weight_adjustment("user-1", "manual") + assert adj <= 0.5 + + +class TestRecommendationTypes: + def test_recommendation_dataclass(self): + from lore.recommend.types import Recommendation + rec = Recommendation( + memory_id="test", + content_preview="Docker setup...", + score=0.85, + explanation="High relevance", + ) + assert rec.score == 0.85 + assert rec.signals == [] + + def test_signal_dataclass(self): + from lore.recommend.types import RecommendationSignal + sig = RecommendationSignal( + name="context_similarity", + score=0.9, + weight=0.4, + explanation="High similarity", + ) + assert sig.score * sig.weight == pytest.approx(0.36) diff --git a/tests/test_retrieval_profiles.py b/tests/test_retrieval_profiles.py new file mode 100644 index 0000000..4b3f287 --- /dev/null +++ b/tests/test_retrieval_profiles.py @@ -0,0 +1,63 @@ +"""Tests for Retrieval Profiles (F4).""" + +from __future__ import annotations + + +class TestProfileCache: + def test_get_cached_returns_none_when_empty(self): + from lore.server.routes.profiles import _get_cached_profile, _profile_cache + _profile_cache.clear() + assert _get_cached_profile("nonexistent") is None + + def test_set_and_get_cached(self): + from lore.server.routes.profiles import _get_cached_profile, _profile_cache, _set_cached_profile + _profile_cache.clear() + profile = {"name": "test", "semantic_weight": 1.0} + _set_cached_profile("org:test", profile) + cached = _get_cached_profile("org:test") + assert cached is not None + assert cached["name"] == "test" + + def test_cache_ttl_expires(self): + import time as _time + + from lore.server.routes.profiles import ( + _PROFILE_CACHE_TTL, + _get_cached_profile, + _profile_cache, + _set_cached_profile, + ) + _profile_cache.clear() + _set_cached_profile("org:expire", {"name": "expire"}) + # Manually expire the entry + key = "org:expire" + old_val, _ = _profile_cache[key] + _profile_cache[key] = (old_val, _time.monotonic() - _PROFILE_CACHE_TTL - 1) + assert _get_cached_profile(key) is None + + +class TestProfileModels: + def test_create_request_defaults(self): + from lore.server.routes.profiles import ProfileCreateRequest + req = ProfileCreateRequest(name="test") + assert req.semantic_weight == 1.0 + assert req.graph_weight == 1.0 + assert req.recency_bias == 30.0 + assert req.min_score == 0.3 + assert req.max_results == 10 + + def test_response_model(self): + from lore.server.routes.profiles import ProfileResponse + resp = ProfileResponse( + id="test-id", + org_id="org-1", + name="fast-coding", + semantic_weight=1.0, + graph_weight=0.5, + recency_bias=7.0, + min_score=0.4, + max_results=10, + is_preset=False, + ) + assert resp.name == "fast-coding" + assert resp.recency_bias == 7.0 diff --git a/tests/test_setup_wizard.py b/tests/test_setup_wizard.py new file mode 100644 index 0000000..c3da8ba --- /dev/null +++ b/tests/test_setup_wizard.py @@ -0,0 +1,133 @@ +"""Tests for enhanced setup wizard (F2).""" + +from __future__ import annotations + +import stat +from unittest.mock import MagicMock, patch + + +class TestBackupConfig: + def test_creates_backup(self, tmp_path): + from lore.setup import _backup_config + config = tmp_path / "config.json" + config.write_text('{"hooks": {}}') + + backup = _backup_config(config) + assert backup is not None + assert backup.exists() + assert "lore-backup" in backup.name + assert backup.read_text() == '{"hooks": {}}' + + def test_returns_none_for_missing_file(self, tmp_path): + from lore.setup import _backup_config + config = tmp_path / "nonexistent.json" + assert _backup_config(config) is None + + def test_prunes_old_backups(self, tmp_path): + from lore.setup import _backup_config + config = tmp_path / "config.json" + config.write_text("v1") + + # Create 4 backups + for i in range(4): + config.write_text(f"v{i+2}") + _backup_config(config) + + backups = list(tmp_path.glob("config.json.lore-backup.*")) + assert len(backups) <= 3 + + +class TestValidateHook: + def test_validates_existing_hook(self, tmp_path): + from lore.setup import _validate_hook + hook = tmp_path / "test-hook.sh" + hook.write_text("#!/bin/bash\necho hello\n") + hook.chmod(hook.stat().st_mode | stat.S_IEXEC) + + errors = _validate_hook(hook) + assert len(errors) == 0 + + def test_reports_missing_hook(self, tmp_path): + from lore.setup import _validate_hook + hook = tmp_path / "missing.sh" + errors = _validate_hook(hook) + assert len(errors) == 1 + assert "does not exist" in errors[0] + + def test_reports_not_executable(self, tmp_path): + from lore.setup import _validate_hook + hook = tmp_path / "no-exec.sh" + hook.write_text("#!/bin/bash\necho hello\n") + hook.chmod(0o644) # no execute permission + + errors = _validate_hook(hook) + assert any("not executable" in e for e in errors) + + +class TestValidateConfig: + def test_valid_json_config(self, tmp_path): + from lore.setup import _validate_config + config = tmp_path / "config.json" + config.write_text('{"hooks": {"test": []}}') + + errors = _validate_config(config, "claude-code") + assert len(errors) == 0 + + def test_invalid_json(self, tmp_path): + from lore.setup import _validate_config + config = tmp_path / "config.json" + config.write_text("{bad json") + + errors = _validate_config(config, "claude-code") + assert any("Invalid JSON" in e for e in errors) + + def test_missing_hooks_key(self, tmp_path): + from lore.setup import _validate_config + config = tmp_path / "config.json" + config.write_text('{"other": "value"}') + + errors = _validate_config(config, "claude-code") + assert any("hooks" in e for e in errors) + + +class TestTestConnection: + def test_unreachable_server(self): + from lore.setup import _test_connection + result = _test_connection("http://localhost:19999") + assert result["status"] == "unreachable" + assert result["health"] is False + + def test_successful_connection(self): + from lore.setup import _test_connection + + with patch("urllib.request.urlopen") as mock_open: + mock_resp = MagicMock() + mock_resp.status = 200 + mock_resp.__enter__ = MagicMock(return_value=mock_resp) + mock_resp.__exit__ = MagicMock(return_value=False) + mock_open.return_value = mock_resp + + result = _test_connection("http://localhost:8765", "lore_sk_test") + assert result["status"] == "ok" + assert result["health"] is True + + +class TestSetupCLI: + def test_setup_dry_run(self, capsys): + from lore.cli import build_parser, cmd_setup + parser = build_parser() + args = parser.parse_args(["setup", "claude-code", "--dry-run"]) + cmd_setup(args) + captured = capsys.readouterr() + assert "dry-run" in captured.out.lower() + + def test_setup_parser_has_new_flags(self): + from lore.cli import build_parser + parser = build_parser() + args = parser.parse_args([ + "setup", "claude-code", + "--validate", "--test-connection", "--dry-run", + ]) + assert args.validate is True + assert args.test_connection is True + assert args.setup_dry_run is True diff --git a/tests/test_slo.py b/tests/test_slo.py new file mode 100644 index 0000000..b8af012 --- /dev/null +++ b/tests/test_slo.py @@ -0,0 +1,189 @@ +"""Tests for SLO Dashboard (F3) — threshold evaluation and alert dispatch.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + + +class TestThresholdEvaluation: + def test_lt_passing(self): + from lore.server.routes.slo import _check_threshold + assert _check_threshold(45.0, "lt", 50.0) is True + + def test_lt_failing(self): + from lore.server.routes.slo import _check_threshold + assert _check_threshold(55.0, "lt", 50.0) is False + + def test_gt_passing(self): + from lore.server.routes.slo import _check_threshold + assert _check_threshold(0.95, "gt", 0.90) is True + + def test_gt_failing(self): + from lore.server.routes.slo import _check_threshold + assert _check_threshold(0.85, "gt", 0.90) is False + + def test_none_value_passes(self): + from lore.server.routes.slo import _check_threshold + assert _check_threshold(None, "lt", 50.0) is True + + def test_equal_value_lt(self): + from lore.server.routes.slo import _check_threshold + # Equal is NOT less than, so it should fail + assert _check_threshold(50.0, "lt", 50.0) is False + + def test_equal_value_gt(self): + from lore.server.routes.slo import _check_threshold + # Equal is NOT greater than, so it should fail + assert _check_threshold(50.0, "gt", 50.0) is False + + +class TestMetricSql: + def test_p50_latency(self): + from lore.server.routes.slo import _metric_sql + sql = _metric_sql("p50_latency") + assert "percentile_cont(0.50)" in sql + assert "AS value" in sql + + def test_p99_latency(self): + from lore.server.routes.slo import _metric_sql + sql = _metric_sql("p99_latency") + assert "percentile_cont(0.99)" in sql + + def test_hit_rate(self): + from lore.server.routes.slo import _metric_sql + sql = _metric_sql("hit_rate") + assert "results_count > 0" in sql + + def test_invalid_metric_raises(self): + from lore.server.routes.slo import _metric_sql + with pytest.raises(KeyError): + _metric_sql("invalid_metric") + + +class TestValidMetrics: + def test_valid_metrics(self): + from lore.server.routes.slo import VALID_METRICS + assert "p50_latency" in VALID_METRICS + assert "p95_latency" in VALID_METRICS + assert "p99_latency" in VALID_METRICS + assert "hit_rate" in VALID_METRICS + + def test_valid_operators(self): + from lore.server.routes.slo import VALID_OPERATORS + assert "lt" in VALID_OPERATORS + assert "gt" in VALID_OPERATORS + + +class TestAlertChannels: + def test_webhook_channel_init(self): + from lore.server.alerting import WebhookChannel + ch = WebhookChannel("https://example.com/webhook") + assert ch.url == "https://example.com/webhook" + + def test_email_channel_init(self): + from lore.server.alerting import EmailChannel + ch = EmailChannel("test@example.com") + assert ch.to_addr == "test@example.com" + + @pytest.mark.asyncio + async def test_webhook_send_with_mock(self): + from lore.server.alerting import WebhookChannel + + ch = WebhookChannel("https://example.com/webhook") + alert = {"slo_name": "test", "metric": "p99_latency", "value": 55.0} + + # Mock urllib since httpx may not be installed + with patch("urllib.request.urlopen") as mock_open: + mock_resp = MagicMock() + mock_resp.status = 200 + mock_resp.__enter__ = MagicMock(return_value=mock_resp) + mock_resp.__exit__ = MagicMock(return_value=False) + mock_open.return_value = mock_resp + + # Force ImportError for httpx to use urllib fallback + import sys + _orig = sys.modules.get("httpx") + sys.modules["httpx"] = None + try: + result = await ch.send(alert) + finally: + if _orig is not None: + sys.modules["httpx"] = _orig + else: + sys.modules.pop("httpx", None) + assert result is True + + @pytest.mark.asyncio + async def test_email_send_without_smtp_config(self): + from lore.server.alerting import EmailChannel + + ch = EmailChannel("test@example.com", smtp_host="") + alert = {"slo_name": "test"} + result = await ch.send(alert) + assert result is False + + +class TestSloResponse: + def test_slo_response_model(self): + from lore.server.routes.slo import SloResponse + slo = SloResponse( + id="test-id", + org_id="org-1", + name="P99 < 50ms", + metric="p99_latency", + operator="lt", + threshold=50.0, + window_minutes=60, + enabled=True, + ) + assert slo.name == "P99 < 50ms" + assert slo.threshold == 50.0 + + def test_slo_status_response(self): + from lore.server.routes.slo import SloStatusResponse + status = SloStatusResponse( + id="test-id", + name="P99 < 50ms", + metric="p99_latency", + threshold=50.0, + operator="lt", + current_value=45.0, + passing=True, + ) + assert status.passing is True + + def test_slo_create_request(self): + from lore.server.routes.slo import SloCreateRequest + req = SloCreateRequest( + name="Hit Rate > 90%", + metric="hit_rate", + operator="gt", + threshold=0.90, + ) + assert req.metric == "hit_rate" + assert req.window_minutes == 60 # default + + +class TestSloCLI: + def test_slo_subparser_exists(self): + from lore.cli import build_parser + parser = build_parser() + args = parser.parse_args(["slo", "list"]) + assert args.command == "slo" + assert args.slo_command == "list" + + def test_slo_create_args(self): + from lore.cli import build_parser + parser = build_parser() + args = parser.parse_args([ + "slo", "create", + "--name", "P99 < 50ms", + "--metric", "p99_latency", + "--threshold", "50", + "--operator", "lt", + ]) + assert args.slo_name == "P99 < 50ms" + assert args.metric == "p99_latency" + assert args.threshold == 50.0 diff --git a/tests/test_workspaces.py b/tests/test_workspaces.py new file mode 100644 index 0000000..b75a39a --- /dev/null +++ b/tests/test_workspaces.py @@ -0,0 +1,51 @@ +"""Tests for Workspaces + RBAC (F7).""" + +from __future__ import annotations + + +class TestWorkspaceModels: + def test_workspace_create_request(self): + from lore.server.routes.workspaces import WorkspaceCreateRequest + req = WorkspaceCreateRequest(name="Dev Team", slug="dev-team") + assert req.name == "Dev Team" + assert req.slug == "dev-team" + assert req.settings == {} + + def test_workspace_response(self): + from lore.server.routes.workspaces import WorkspaceResponse + resp = WorkspaceResponse( + id="ws-1", org_id="org-1", + name="Dev Team", slug="dev-team", + ) + assert resp.slug == "dev-team" + + def test_member_add_request(self): + from lore.server.routes.workspaces import MemberAddRequest + req = MemberAddRequest(user_id="user-1") + assert req.role == "writer" # default + + def test_member_response(self): + from lore.server.routes.workspaces import MemberResponse + resp = MemberResponse( + id="mem-1", workspace_id="ws-1", + user_id="user-1", role="admin", + ) + assert resp.role == "admin" + + +class TestAuditModels: + def test_audit_entry(self): + from lore.server.routes.audit import AuditEntry + entry = AuditEntry( + id=1, org_id="org-1", + actor_id="key-1", actor_type="api_key", + action="memory.create", + ) + assert entry.action == "memory.create" + assert entry.metadata == {} + + def test_audit_writer_import(self): + from lore.server.audit import fire_audit_log, write_audit_log + # Just verify imports work + assert callable(write_audit_log) + assert callable(fire_audit_log)