diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index b29bffbc..ef15af00 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -130,7 +130,14 @@ npm run build:frontend && npm run copy:frontend && npm run dev:backend ### Database Schema -SQLite schema defined in `backend/src/database/schema.sql`, migrations in `migrations.sql`. Key tables: +SQLite schema uses a pure migration-first approach: + +- All schema definitions are in numbered migrations: `backend/src/database/migrations/*.sql` +- Migration 000: Initial schema (executions, revoked_tokens) +- Migration 001: RBAC tables (users, roles, permissions, groups) +- Future changes: Always create a new numbered migration, never modify existing ones + +Key tables: - `executions`: Stores all command/task execution history with results - Auto-create on first run via `DatabaseService` diff --git a/.kiro/analysis/ManageTab_Lifecycle_Flow.md b/.kiro/analysis/ManageTab_Lifecycle_Flow.md new file mode 100644 index 00000000..1cfd840d --- /dev/null +++ b/.kiro/analysis/ManageTab_Lifecycle_Flow.md @@ -0,0 +1,234 @@ +# ManageTab Lifecycle Actions - End-to-End Flow Analysis + +## Error Context + +``` +Error: "No provisioning provider found for node ID: almalinux10.test.example42.com" +Debug: nodeType=unknown, currentStatus=unknown, provider=undefined, + providerData.id=undefined, metadata.type=undefined, metadata.status=undefined +``` + +## Complete Flow Trace + +### 1. Frontend API Client (frontend/src/lib/api.ts) + +Function: fetchLifecycleActions(nodeId: string) + +- Location: Lines ~1100-1110 (in provisioning API section) +- Endpoint: GET /api/nodes/{nodeId}/lifecycle-actions +- Retry Logic: 2 retries with 1000ms delay +- Returns: { provider: string; actions: LifecycleAction[] } + +### 2. ManageTab Component (frontend/src/components/ManageTab.svelte) + +Props Received: + +- nodeId: string - The node identifier (e.g., "proxmox:node:vmid" or "aws:region:instanceId") +- nodeType?: 'vm' | 'lxc' | 'unknown' - Type of node (defaults to 'unknown') +- currentStatus?: string - Current node status (defaults to 'unknown') +- onStatusChange?: () => void - Callback when status changes + +Key Function: fetchAvailableActions() + +- Calls fetchLifecycleActions(nodeId) +- Sets provider and availableActions from response +- Problem: ManageTab receives nodeId but doesn't validate it has the correct format + +### 3. NodeDetailPage (frontend/src/pages/NodeDetailPage.svelte) + +Where ManageTab is Used: In the "manage" tab (line ~1800+) + +- nodeId comes from route params: params?.id || '' +- node is fetched via GET /api/nodes/{nodeId} +- Node data should contain provider metadata, but it's not being extracted + +Issue: The node detail page fetches node data but doesn't extract or pass provider information to ManageTab. The nodeId passed to ManageTab is the raw route parameter, which may be a hostname instead of a provider-prefixed ID. + +### 4. Backend Route Handler (backend/src/routes/inventory.ts) + +Endpoint: GET /api/nodes/:id/lifecycle-actions + +- Location: Lines ~1100-1150 +- Authentication: Required (via authMiddleware) +- RBAC: Required (provisioning:read permission) + +Handler Logic: + +1. Parse nodeId from params +2. Call getExecutionToolForNode(nodeId, res) +3. Get capabilities from tool +4. Build lifecycle action definitions +5. Return { provider, actions } + +### 5. Provider Resolution (backend/src/routes/inventory.ts) + +Function: resolveProvider(nodeId: string): string | null + +- Location: Lines ~1050-1060 +- Logic: Extracts prefix from nodeId (before first colon) +- Supported Prefixes: "proxmox", "aws" + +Function: getExecutionToolForNode(nodeId: string, res: Response) + +- Location: Lines ~1065-1090 +- Returns: { tool: ExecutionToolPlugin; provider: string } | null +- Error Cases: + 1. Integration manager not initialized → 503 "INTEGRATION_NOT_AVAILABLE" + 2. Provider prefix not recognized → 400 "UNSUPPORTED_PROVIDER" ← THIS IS THE ERROR + 3. Provider not configured → 503 "PROVIDER_NOT_CONFIGURED" + +### 6. Node Data Fetching (backend/src/routes/inventory.ts) + +Endpoint: GET /api/nodes/:id + +- Location: Lines ~800-950 +- Returns: Node object with metadata + +Problem: The node endpoint returns node data but doesn't include provider information. The frontend receives a node object without the provider-prefixed ID format needed by the lifecycle actions endpoint. + +## Root Cause Analysis + +Why the Error Occurs: + +1. Node ID Format Mismatch: + - Frontend receives nodeId from route params (e.g., "almalinux10.test.example42.com") + - This is a hostname, not a provider-prefixed ID (e.g., "proxmox:node:vmid") + - Backend's resolveProvider() looks for a colon prefix and finds none + - Returns null, triggering the error + +2. Missing Provider Metadata: + - Node data fetched from GET /api/nodes/:id doesn't include provider information + - Frontend can't determine which provider manages the node + - ManageTab receives nodeId as a plain hostname + +3. No Node Linking: + - The system has a NodeLinkingService (mentioned in structure) that should map nodes across integrations + - But the node detail page doesn't use it to resolve provider information + - Frontend doesn't know which integration owns the node + +## Data Flow Diagram + +Frontend Route Params + ↓ + nodeId = "almalinux10.test.example42.com" (hostname) + ↓ +NodeDetailPage + ↓ + Fetches: GET /api/nodes/{nodeId} + ↓ + Returns: { id, name, uri, transport, config } + ↓ + Passes to ManageTab: nodeId (still hostname), nodeType='unknown', currentStatus='unknown' + ↓ +ManageTab.fetchAvailableActions() + ↓ + Calls: GET /api/nodes/{nodeId}/lifecycle-actions + ↓ +Backend resolveProvider() + ↓ + Looks for prefix before colon in "almalinux10.test.example42.com" + ↓ + Finds: "almalinux10" (not a valid provider) + ↓ + Returns: null + ↓ + Error: "No provisioning provider found for node ID: almalinux10.test.example42.com" + +## What Should Happen + +1. Node Linking: When fetching node details, the backend should: + - Identify which integration(s) manage this node + - Return provider information in the node object + - Include provider-prefixed node ID for provisioning operations + +2. Frontend Extraction: NodeDetailPage should: + - Extract provider information from node data + - Pass provider-prefixed nodeId to ManageTab + - Pass actual nodeType and currentStatus from node metadata + +3. ManageTab Usage: ManageTab should: + - Receive provider-prefixed nodeId (e.g., "proxmox:node:vmid") + - Validate nodeId format before calling API + - Show appropriate error if nodeId format is invalid + +## Missing Pieces + +In Backend: + +- Node endpoint doesn't return provider information +- No mechanism to link a hostname to a provider-prefixed ID +- NodeLinkingService exists but isn't used in node detail endpoint + +In Frontend: + +- NodeDetailPage doesn't extract provider metadata +- ManageTab doesn't validate nodeId format +- No fallback for nodes without provider information + +## Related Code Locations + +- Frontend API: pabawi/frontend/src/lib/api.ts (lines ~1100-1110) +- ManageTab Component: pabawi/frontend/src/components/ManageTab.svelte (entire file) +- NodeDetailPage: pabawi/frontend/src/pages/NodeDetailPage.svelte (lines ~1-50, ~1800+) +- Backend Route: pabawi/backend/src/routes/inventory.ts (lines ~800-1150) +- Provider Resolution: pabawi/backend/src/routes/inventory.ts (lines ~1050-1090) +- Integration Manager: pabawi/backend/src/integrations/IntegrationManager.ts +- Node Linking Service: pabawi/backend/src/integrations/NodeLinkingService.ts + +## Lifecycle Actions Endpoint Details + +GET /api/nodes/:id/lifecycle-actions + +Request: +GET /api/nodes/proxmox:node:100/lifecycle-actions +Authorization: Bearer {token} + +Success Response (200): +{ + "provider": "proxmox", + "actions": [ + { + "name": "start", + "displayName": "Start", + "description": "Start the VM", + "requiresConfirmation": false, + "destructive": false, + "availableWhen": ["stopped"] + }, + { + "name": "stop", + "displayName": "Stop", + "description": "Stop the VM", + "requiresConfirmation": false, + "destructive": false, + "availableWhen": ["running"] + }, + { + "name": "destroy", + "displayName": "Destroy", + "description": "Permanently delete the VM", + "requiresConfirmation": true, + "destructive": true, + "availableWhen": ["stopped", "running", "suspended", "unknown"] + } + ] +} + +Error Response (400): +{ + "error": { + "code": "UNSUPPORTED_PROVIDER", + "message": "No provisioning provider found for node ID: almalinux10.test.example42.com" + } +} + +## Summary + +The error occurs because: + +1. Frontend passes a hostname as nodeId to ManageTab +2. Backend expects a provider-prefixed ID (e.g., "proxmox:node:vmid") +3. Provider resolution fails because hostname doesn't have a colon prefix +4. Backend returns 400 error with the message shown + +Fix Required: Backend node endpoint must return provider information so frontend can construct the correct provider-prefixed nodeId for lifecycle actions. diff --git a/.kiro/database-cleanup-prompt.md b/.kiro/database-cleanup-prompt.md new file mode 100644 index 00000000..f1a8a3e5 --- /dev/null +++ b/.kiro/database-cleanup-prompt.md @@ -0,0 +1,149 @@ +# Database Schema Cleanup - New Conversation Prompt + +## Task Overview + +Review and clean up the Pabawi database schema structure to eliminate duplicates, remove orphaned files, and establish a clear, maintainable approach for database schema management. + +## Background + +The project uses SQLite with a hybrid approach: base schema files + migration system. This has led to duplicate definitions and orphaned files that need cleanup. + +## Current File Structure + +``` +backend/src/database/ +├── schema.sql # Base schema: executions table +├── rbac-schema.sql # Base schema: RBAC tables (DUPLICATE of 001) +├── audit-schema.sql # ⚠️ ORPHANED - never loaded, duplicates 004 +├── migrations.sql # ⚠️ ORPHANED - never loaded, superseded +├── DatabaseService.ts # Loads schema.sql + rbac-schema.sql, then runs migrations +├── MigrationRunner.ts # Runs numbered migrations from migrations/ +└── migrations/ + ├── 001_initial_rbac.sql # Creates RBAC tables (duplicates rbac-schema.sql) + ├── 002_seed_rbac_data.sql # Seeds roles, permissions, config + ├── 003_failed_login_attempts.sql # Adds security tables + ├── 004_audit_logging.sql # Adds audit_logs (duplicates audit-schema.sql) + ├── 005_add_ssh_execution_tool.sql # Updates executions table + └── 006_add_batch_executions.sql # Adds batch support +``` + +## Key Files to Examine + +Please read these files to understand the current state: + +1. **Initialization Logic:** + - `backend/src/database/DatabaseService.ts` - How schemas are loaded + - `backend/src/database/MigrationRunner.ts` - How migrations run + +2. **Base Schemas:** + - `backend/src/database/schema.sql` - Executions table + - `backend/src/database/rbac-schema.sql` - RBAC tables + +3. **Orphaned Files (candidates for deletion):** + - `backend/src/database/audit-schema.sql` - Never loaded, duplicates migration 004 + - `backend/src/database/migrations.sql` - Never loaded, superseded by schema.sql + +4. **All Migrations:** + - `backend/src/database/migrations/001_initial_rbac.sql` through `006_add_batch_executions.sql` + +## Problems to Address + +### 1. Duplicate Definitions + +- `rbac-schema.sql` has identical content to `001_initial_rbac.sql` +- `audit-schema.sql` has identical content to `004_audit_logging.sql` +- Both use `CREATE TABLE IF NOT EXISTS` so they don't conflict, but it's confusing + +### 2. Orphaned Files + +- `audit-schema.sql` exists but is never loaded by DatabaseService.ts +- `migrations.sql` exists but is never loaded by DatabaseService.ts +- These should probably be deleted + +### 3. Unclear Pattern + +- Some tables created via base schemas (executions, RBAC) +- Other tables created via migrations (audit_logs, failed_login_attempts, batch_executions) +- No documented reason for the difference + +## Current Initialization Flow + +For a **new database**: + +1. DatabaseService loads `schema.sql` → creates executions table +2. DatabaseService loads `rbac-schema.sql` → creates RBAC tables +3. MigrationRunner runs pending migrations: + - 001: Tries to create RBAC tables (already exist, skipped due to IF NOT EXISTS) + - 002: Seeds default data + - 003: Creates failed_login_attempts tables + - 004: Tries to create audit_logs (doesn't exist yet, gets created) + - 005: Updates executions table + - 006: Creates batch_executions, updates executions + +For an **existing database**: + +1. Base schemas already applied (no-op due to IF NOT EXISTS) +2. MigrationRunner runs only new migrations since last run + +## Decision Points + +### Option A: Migration-First Approach (Clean, Standard) + +- Move ALL schema definitions to migrations +- Delete base schema files (schema.sql, rbac-schema.sql) +- Update DatabaseService to only run MigrationRunner +- Create migration 000 for initial executions table + +**Pros:** Single source of truth, standard approach, clear history +**Cons:** Requires more refactoring, slower initial setup + +### Option B: Keep Hybrid Approach (Current, Less Work) + +- Keep schema.sql and rbac-schema.sql for initial setup +- Accept that migrations 001 and 004 duplicate base schemas +- Delete only the orphaned files (audit-schema.sql, migrations.sql) +- Document the approach clearly + +**Pros:** Less refactoring, faster new DB setup +**Cons:** Duplicate definitions remain, less standard + +## Recommended Actions + +### Immediate (Safe, Low Risk) + +1. Delete `backend/src/database/audit-schema.sql` - never loaded, duplicates migration 004 +2. Delete `backend/src/database/migrations.sql` - never loaded, superseded +3. Add comments to DatabaseService.ts explaining the hybrid approach +4. Add comments to schema files noting their relationship to migrations + +### Optional (Requires Decision) + +1. Choose between Option A (migration-first) or Option B (hybrid) +2. If Option A: Refactor to pure migration approach +3. If Option B: Document and accept the hybrid approach +4. Update developer documentation with schema change policy + +## Testing Requirements + +After any changes: + +- ✅ Test fresh database initialization (no existing pabawi.db) +- ✅ Test migration from previous versions +- ✅ Verify all tables created correctly +- ✅ Run existing database tests +- ✅ Test Docker deployment with clean database +- ✅ Verify setup wizard works + +## Questions to Answer + +1. **Which approach should we use going forward?** Migration-first +2. **Are the orphaned files safe to delete?** Check for any hidden reference YES +3. **Should we keep duplicate migrations?** (001 and 004 duplicate base schemas) NO +4. **What's the policy for future changes?** Always use migrations? YES + +## Success Criteria + +- ✅ No orphaned files in the database directory +- ✅ Clear documentation of the schema management approach +- ✅ All tests pass +- ✅ Docker deployment works with cl diff --git a/.kiro/todo/batch-execution-missing-action-execution.md b/.kiro/done/batch-execution-missing-action-execution.md similarity index 100% rename from .kiro/todo/batch-execution-missing-action-execution.md rename to .kiro/done/batch-execution-missing-action-execution.md diff --git a/.kiro/done/database-schema-cleanup-task.md b/.kiro/done/database-schema-cleanup-task.md new file mode 100644 index 00000000..6e3c114f --- /dev/null +++ b/.kiro/done/database-schema-cleanup-task.md @@ -0,0 +1,275 @@ +# Database Schema Cleanup and Consolidation Task + +## Status: ✅ COMPLETED - Migration-First Approach + +All base schema files have been deleted and converted to migrations. The database now uses a pure migration-first approach. + +## Context + +The Pabawi project has database schema definitions in multiple places with overlapping content, creating maintenance issues and potential inconsistencies. This needs to be reviewed and cleaned up to follow a clear migration-based approach. + +## Current State Analysis + +### Files in `backend/src/database/` + +1. **schema.sql** - Base schema for executions table and revoked_tokens + - Contains: executions table, indexes, revoked_tokens table + - Used by: DatabaseService.ts (loaded first) + - Status: ✅ Currently used + +2. **rbac-schema.sql** - RBAC tables (users, roles, permissions, groups) + - Contains: Complete RBAC system tables and indexes + - Used by: DatabaseService.ts (loaded second) + - Status: ✅ Currently used + - **DUPLICATE**: Identical content exists in `migrations/001_initial_rbac.sql` + +3. **audit-schema.sql** - Audit logging tables + - Contains: audit_logs table and indexes + - Used by: NOT referenced in DatabaseService.ts + - Status: ⚠️ NOT LOADED - appears to be orphaned + - **DUPLICATE**: Identical content exists in `migrations/004_audit_logging.sql` + +4. **migrations.sql** - Legacy migration file with ALTER TABLE statements + - Contains: Column additions to executions table (command, expert_mode, original_execution_id, re_execution_count, stdout, stderr, execution_tool) + - Used by: NOT referenced in DatabaseService.ts + - Status: ⚠️ NOT LOADED - appears to be orphaned + - **SUPERSEDED**: These changes are now in schema.sql and handled by structured migrations + +### Files in `backend/src/database/migrations/` + +1. **001_initial_rbac.sql** - Creates RBAC tables + - Identical to rbac-schema.sql + - Properly tracked by MigrationRunner + +2. **002_seed_rbac_data.sql** - Seeds default roles, permissions, config + - Inserts default data (roles, permissions, config) + - Properly tracked by MigrationRunner + +3. **003_failed_login_attempts.sql** - Adds security tables + - Creates failed_login_attempts and account_lockouts tables + - Properly tracked by MigrationRunner + +4. **004_audit_logging.sql** - Adds audit logging + - Identical to audit-schema.sql + - Properly tracked by MigrationRunner + +5. **005_add_ssh_execution_tool.sql** - Updates executions table + - Recreates executions table with SSH support + - Properly tracked by MigrationRunner + +6. **006_add_batch_executions.sql** - Adds batch execution support + - Creates batch_executions table + - Adds batch_id and batch_position to executions + - Properly tracked by MigrationRunner + +## Current Database Initialization Flow + +From `DatabaseService.ts`: + +```typescript +1. Load and execute schema.sql (executions table) +2. Load and execute rbac-schema.sql (RBAC tables) +3. Run MigrationRunner.runPendingMigrations() + - Checks migrations table for applied migrations + - Runs any pending migrations from migrations/ directory +``` + +## Problems Identified + +### 1. Duplicate Definitions + +- `rbac-schema.sql` duplicates `001_initial_rbac.sql` +- `audit-schema.sql` duplicates `004_audit_logging.sql` +- Both base schemas AND migrations create the same tables + +### 2. Orphaned Files + +- `audit-schema.sql` is never loaded by DatabaseService +- `migrations.sql` is never loaded by DatabaseService +- These files exist but serve no purpose + +### 3. Inconsistent Approach + +- Some tables created via base schema files (executions, RBAC) +- Other tables created via migrations (audit_logs, failed_login_attempts, batch_executions) +- No clear pattern for when to use which approach + +### 4. Migration Confusion + +- For new databases: Base schemas create tables, then migrations run (but tables already exist due to CREATE IF NOT EXISTS) +- For existing databases: Migrations properly add new tables/columns +- This works but is confusing and error-prone + +## Recommended Approach + +### Option A: Migration-First (Recommended) + +Move all schema definitions to migrations, use base schemas only for the absolute minimum. + +**Pros:** + +- Single source of truth for all schema changes +- Clear history of database evolution +- Standard approach used by most frameworks +- Easy to understand and maintain + +**Cons:** + +- Requires refactoring existing code +- Need to ensure migration 001 creates ALL initial tables + +### Option B: Base Schema + Migrations (Current Hybrid) + +Keep base schemas for initial tables, use migrations only for changes. + +**Pros:** + +- Less refactoring needed +- Faster initial setup (no migration runner needed for new DBs) + +**Cons:** + +- Duplicate definitions between base schemas and migrations +- Confusing which file is the source of truth +- Current state has orphaned files + +## Recommended Actions + +### Phase 1: Immediate Cleanup (Remove Duplicates) + +1. **Delete orphaned files:** + - Delete `backend/src/database/audit-schema.sql` (duplicates migration 004) + - Delete `backend/src/database/migrations.sql` (superseded by schema.sql + migrations) + +2. **Update DatabaseService.ts:** + - Remove code that tries to load audit-schema.sql (if any) + - Verify rbac-schema.sql loading is still needed + +3. **Document the approach:** + - Add comments explaining why rbac-schema.sql exists alongside 001_initial_rbac.sql + - Clarify that base schemas are for new installations, migrations for upgrades + +### Phase 2: Long-term Consolidation (Optional) + +Choose between Option A or Option B and implement consistently: + +**If choosing Option A (Migration-First):** + +1. Create migration 000_initial_schema.sql with executions table +2. Ensure 001_initial_rbac.sql is complete +3. Remove schema.sql and rbac-schema.sql +4. Update DatabaseService to only run migrations +5. Update tests to use migration-based setup + +**If choosing Option B (Keep Current Hybrid):** + +1. Keep schema.sql and rbac-schema.sql as base schemas +2. Accept that migrations 001 and 004 duplicate base schemas +3. Document that migrations use CREATE IF NOT EXISTS for idempotency +4. Ensure all future changes go through migrations only + +## Files to Review + +- `pabawi/backend/src/database/DatabaseService.ts` - Initialization logic +- `pabawi/backend/src/database/MigrationRunner.ts` - Migration execution +- `pabawi/backend/src/database/schema.sql` - Base executions schema +- `pabawi/backend/src/database/rbac-schema.sql` - Base RBAC schema +- `pabawi/backend/src/database/audit-schema.sql` - ⚠️ Orphaned, should delete +- `pabawi/backend/src/database/migrations.sql` - ⚠️ Orphaned, should delete +- `pabawi/backend/src/database/migrations/*.sql` - All migration files +- `pabawi/Dockerfile` - Now copies entire database/ directory + +## Testing Requirements + +After cleanup: + +1. Test fresh database initialization (no existing DB) +2. Test migration from each previous version +3. Verify all tables are created correctly +4. Run existing database tests +5. Test Docker deployment with clean database + +## Questions to Answer + +1. Should we keep the hybrid approach or move to migration-first? +2. Are there any other references to the orphaned files? +3. Should migrations 001 and 004 be kept even though they duplicate base schemas? +4. What's the policy for future schema changes - always use migrations? + +## Related Issues + +- Docker deployment bug (fixed) - Missing schema files in Docker image +- Database initialization on clean setup + +--- + +## Final Completion Summary (March 11, 2026) - Migration-First Approach + +### Actions Taken + +1. **Deleted ALL base schema files:** + - ✅ `backend/src/database/schema.sql` - Converted to migration 000 + - ✅ `backend/src/database/rbac-schema.sql` - Already in migration 001 + - ✅ `backend/src/database/audit-schema.sql` - Already in migration 004 (orphaned) + - ✅ `backend/src/database/migrations.sql` - Orphaned, superseded + +2. **Created migration 000:** + - ✅ `migrations/000_initial_schema.sql` - Contains executions and revoked_tokens tables + - This is now the first migration that runs on a fresh database + +3. **Refactored DatabaseService.ts:** + - ✅ Removed all base schema loading code + - ✅ Removed unused `exec()` method + - ✅ Removed unused imports (readFileSync, join) + - ✅ Now only runs migrations via MigrationRunner + - ✅ Added comprehensive documentation explaining migration-first policy + +4. **Updated build configuration:** + - ✅ Modified `backend/package.json` build script + - Now only copies `migrations/` directory (no base schemas) + +5. **Updated all documentation:** + - ✅ `docs/development/BACKEND_CODE_ANALYSIS.md` - Migration-first approach + - ✅ `.github/copilot-instructions.md` - Migration-first approach + - ✅ `CLAUDE.md` - Migration-first approach + +### Final State - Pure Migration-First + +**Schema Management Policy:** + +- ALL schema definitions are in numbered migrations (000, 001, 002, ...) +- Migration 000: Initial schema (executions, revoked_tokens) +- Migration 001: RBAC tables (users, roles, permissions, groups) +- Migration 002: Seeds RBAC data +- Migration 003: Failed login attempts +- Migration 004: Audit logging +- Migration 005: SSH execution tool +- Migration 006: Batch executions +- Future changes: Always create a new numbered migration +- Never modify existing migrations after they've been applied + +**Files:** + +- ✅ `migrations/000_initial_schema.sql` through `006_add_batch_executions.sql` +- ✅ No base schema files +- ✅ No duplicate definitions +- ✅ Single source of truth: migrations directory + +**Testing:** + +- ✅ Build passes successfully +- ✅ TypeScript compilation clean +- ✅ No unused code or imports + +### Benefits of Migration-First Approach + +1. **Single source of truth** - All schema in one place (migrations/) +2. **Clear history** - Every change is tracked and numbered +3. **No duplicates** - Eliminated all duplicate table definitions +4. **Standard practice** - Follows industry-standard migration patterns +5. **Easy rollback** - Can track exactly what changed and when +6. **Clean codebase** - Simpler DatabaseService with less code + +### Next Steps + +None required. The migration-first approach is fully implemented and documented. diff --git a/.kiro/todo/default-user-permissions-fix.md b/.kiro/done/default-user-permissions-fix.md similarity index 100% rename from .kiro/todo/default-user-permissions-fix.md rename to .kiro/done/default-user-permissions-fix.md diff --git a/.kiro/done/docker-missing-schema-files.md b/.kiro/done/docker-missing-schema-files.md new file mode 100644 index 00000000..e1c8d528 --- /dev/null +++ b/.kiro/done/docker-missing-schema-files.md @@ -0,0 +1,49 @@ +# Bug: Docker Image Missing Database Schema Files + +## Issue + +On clean Docker setup using 0.8.0 image, the application fails to start with database errors: + +``` +ERROR [SetupService] [isSetupComplete] Failed to check setup status +Error: SQLITE_ERROR: no such table: users +``` + +Users see login page with 500 backend errors. + +## Root Cause + +The Dockerfile was only copying `schema.sql` but not the other required database files: + +- `rbac-schema.sql` (contains users, roles, permissions tables) +- `audit-schema.sql` (contains audit logging tables) +- `migrations/` directory (contains database migrations) + +TypeScript compiler only copies `.ts` files to `dist/`, so SQL files and migrations must be explicitly copied. + +## Fix Applied + +Updated Dockerfile to copy the entire database directory structure: + +```dockerfile +# Copy database directory with all SQL files and migrations (not copied by TypeScript compiler) +# This ensures schema files, migrations, and any future database-related files are included +COPY --from=backend-builder --chown=pabawi:pabawi /app/backend/src/database/ ./dist/database/ +``` + +This approach is future-proof - any new schema files or migrations added to `src/database/` will automatically be included in the Docker image. + +## Testing Required + +1. Rebuild Docker image with the fix: `docker build -t pabawi:0.8.0-fixed .` +2. Test clean deployment (no existing database) +3. Verify setup page loads correctly +4. Verify admin user creation works +5. Verify login functionality +6. Check that all migrations run successfully + +## Related Files + +- `pabawi/Dockerfile` +- `pabawi/backend/src/database/DatabaseService.ts` +- `pabawi/backend/src/database/` (entire directory now copied) diff --git a/.kiro/done/node-linking-redesign.md b/.kiro/done/node-linking-redesign.md new file mode 100644 index 00000000..3018f8d3 --- /dev/null +++ b/.kiro/done/node-linking-redesign.md @@ -0,0 +1,103 @@ +# Node Linking Redesign - IMPLEMENTED + +## Problem + +Current implementation tries to merge nodes into a single object with one ID, causing: + +- ManageTab can't find Proxmox nodes (wrong ID format) +- Puppet reports can't find nodes (looking for Proxmox ID instead of hostname) +- Complex logic trying to prioritize which ID to use + +## Solution Implemented + +### Backend Changes + +1. **Updated `LinkedNode` interface** to include `sourceData`: + + ```typescript + interface LinkedNode extends Node { + sources: string[]; + linked: boolean; + sourceData: Record; // NEW + } + + interface SourceNodeData { + id: string; + uri: string; + config?: Record; + metadata?: Record; + status?: string; + } + ``` + +2. **Simplified `NodeLinkingService.linkNodes()`**: + - Uses node `name` as the primary ID (common identifier across sources) + - Stores source-specific data in `sourceData` object + - Each source keeps its original ID, URI, metadata, etc. + +3. **Simplified `IntegrationManager.deduplicateNodes()`**: + - Now just calls `linkNodes()` directly + - No more complex priority-based merging + - Source data is already correctly organized + +### How It Works + +When nodes from different sources are linked: + +```typescript +// Input nodes: +// - Bolt: id="debian13.test.example42.com", name="debian13.test.example42.com" +// - Proxmox: id="proxmox:minis:100", name="debian13.test.example42.com" +// - PuppetDB: id="debian13.test.example42.com", name="debian13.test.example42.com" + +// Output linked node: +{ + id: "debian13.test.example42.com", // Primary ID (name) + name: "debian13.test.example42.com", + sources: ["bolt", "proxmox", "puppetdb"], + linked: true, + sourceData: { + bolt: { + id: "debian13.test.example42.com", + uri: "ssh://debian13.test.example42.com" + }, + proxmox: { + id: "proxmox:minis:100", + uri: "proxmox://minis/100", + metadata: { vmid: 100, node: "minis", type: "qemu", status: "running" } + }, + puppetdb: { + id: "debian13.test.example42.com", + uri: "ssh://debian13.test.example42.com" + } + } +} +``` + +### Frontend Usage (Next Step) + +Components should use source-specific data: + +```typescript +// ManageTab - use Proxmox ID +if (node.sourceData?.proxmox) { + await executeNodeAction(node.sourceData.proxmox.id, action); +} + +// Puppet Reports - use PuppetDB ID +if (node.sourceData?.puppetdb) { + const reports = await fetchReports(node.sourceData.puppetdb.id); +} +``` + +## Test Results + +✅ All tests pass (12/12) +✅ New test added: "should store source-specific data for each source" + +## Next Steps + +1. Update frontend components to use `sourceData` +2. Update API endpoints to search by any source ID or name +3. Update ManageTab to use `node.sourceData.proxmox.id` +4. Update Puppet components to use `node.sourceData.puppetdb.id` diff --git a/.kiro/done/provisioning-endpoint-fix.md b/.kiro/done/provisioning-endpoint-fix.md new file mode 100644 index 00000000..ef6f54ee --- /dev/null +++ b/.kiro/done/provisioning-endpoint-fix.md @@ -0,0 +1,38 @@ +# Provisioning Endpoint Fix + +## Issue + +The frontend was calling `/api/integrations/provisioning` but this endpoint didn't exist in the backend, causing a JSON parse error: "Unexpected token '<', "; +} + +// Proxmox VM creation +POST /api/integrations/proxmox/provision/vm +Body: VMCreateParams +Response: { taskId: string; vmid: number; } + +// Proxmox LXC creation +POST /api/integrations/proxmox/provision/lxc +Body: LXCCreateParams +Response: { taskId: string; vmid: number; } + +// Node lifecycle actions +POST /api/integrations/proxmox/nodes/:nodeId/action +Body: { action: string; parameters?: Record } +Response: { taskId: string; status: string; } + +// Node destruction +DELETE /api/integrations/proxmox/nodes/:nodeId +Response: { taskId: string; status: string; } + +// Integration configuration +PUT /api/integrations/proxmox/config +Body: ProxmoxConfig +Response: { success: boolean; } + +// Connection test +POST /api/integrations/proxmox/test +Body: ProxmoxConfig +Response: { success: boolean; message: string; } +``` + +## Components and Interfaces + +### Core Types + +```typescript +// Integration capability metadata +interface ProvisioningCapability { + name: string; + description: string; + operation: 'create' | 'destroy'; + parameters: CapabilityParameter[]; +} + +interface CapabilityParameter { + name: string; + type: 'string' | 'number' | 'boolean' | 'object' | 'array'; + required: boolean; + description?: string; + default?: unknown; + validation?: { + min?: number; + max?: number; + pattern?: string; + enum?: string[]; + }; +} + +// Integration metadata +interface ProvisioningIntegration { + name: string; + displayName: string; + type: 'virtualization' | 'cloud' | 'container'; + status: 'connected' | 'degraded' | 'not_configured'; + capabilities: ProvisioningCapability[]; +} + +// Proxmox-specific types +interface ProxmoxVMParams { + vmid: number; + name: string; + node: string; + cores?: number; + memory?: number; + sockets?: number; + cpu?: string; + scsi0?: string; + ide2?: string; + net0?: string; + ostype?: string; +} + +interface ProxmoxLXCParams { + vmid: number; + hostname: string; + node: string; + ostemplate: string; + cores?: number; + memory?: number; + rootfs?: string; + net0?: string; + password?: string; +} + +// Lifecycle action types +interface LifecycleAction { + name: string; + displayName: string; + description: string; + requiresConfirmation: boolean; + destructive: boolean; + availableWhen: string[]; // Node states when action is available +} + +// Operation result +interface ProvisioningResult { + success: boolean; + taskId?: string; + vmid?: number; + nodeId?: string; + message: string; + error?: string; +} +``` + +### ProvisionPage Component + +**Purpose**: Main page for creating new VMs and containers + +**State Management**: + +```typescript +let integrations = $state([]); +let selectedIntegration = $state('proxmox'); +let loading = $state(true); +let error = $state(null); +``` + +**Key Functions**: + +- `fetchIntegrations()`: Load available provisioning integrations +- `selectIntegration(name: string)`: Switch between integrations +- `handleProvisionSuccess(result: ProvisioningResult)`: Navigate to new node + +**Routing**: `/provision` + +**RBAC**: Hidden from navigation if user lacks provisioning permissions + +### ProxmoxProvisionForm Component + +**Purpose**: Tabbed interface for VM and LXC creation + +**State Management**: + +```typescript +let activeTab = $state<'vm' | 'lxc'>('vm'); +let formData = $state({}); +let validationErrors = $state>({}); +let submitting = $state(false); +``` + +**Key Functions**: + +- `validateForm()`: Client-side validation before submission +- `submitForm()`: POST to provisioning endpoint +- `resetForm()`: Clear form after successful submission + +**Validation Rules**: + +- VMID: Required, positive integer, unique +- Name/Hostname: Required, alphanumeric with hyphens +- Node: Required, must be valid Proxmox node +- Memory: Optional, minimum 512MB +- Cores: Optional, minimum 1 + +### ManageTab Component + +**Purpose**: Lifecycle actions on node detail page + +**State Management**: + +```typescript +let availableActions = $state([]); +let nodeStatus = $state('unknown'); +let actionInProgress = $state(null); +let confirmDialog = $state<{ action: string; open: boolean }>({ action: '', open: false }); +``` + +**Key Functions**: + +- `fetchAvailableActions()`: Query backend for permitted actions +- `executeAction(action: string)`: Perform lifecycle operation +- `confirmDestructiveAction(action: string)`: Show confirmation dialog +- `pollActionStatus(taskId: string)`: Monitor operation completion + +**Action Availability Logic**: + +```typescript +const actionAvailability = { + start: ['stopped'], + stop: ['running'], + shutdown: ['running'], + reboot: ['running'], + suspend: ['running'], + resume: ['suspended'], + destroy: ['stopped', 'running', 'suspended'] +}; +``` + +### ProxmoxSetupGuide Component + +**Purpose**: Configuration form for Proxmox integration + +**State Management**: + +```typescript +let config = $state({ + host: '', + port: 8006, + username: '', + password: '', + realm: 'pam', + ssl: { rejectUnauthorized: true } +}); +let testResult = $state<{ success: boolean; message: string } | null>(null); +let saving = $state(false); +``` + +**Key Functions**: + +- `testConnection()`: Verify Proxmox connectivity +- `saveConfiguration()`: Persist config to backend +- `validateConfig()`: Client-side validation + +**Validation Rules**: + +- Host: Required, valid hostname or IP +- Port: Required, 1-65535 +- Authentication: Either (username + password + realm) OR token +- SSL: Warning if rejectUnauthorized is false + +### Form Validation Utilities + +**Purpose**: Reusable validation functions + +```typescript +// lib/validation.ts +export function validateVMID(vmid: number): string | null { + if (!vmid || vmid < 100 || vmid > 999999999) { + return 'VMID must be between 100 and 999999999'; + } + return null; +} + +export function validateHostname(hostname: string): string | null { + const pattern = /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/; + if (!pattern.test(hostname)) { + return 'Hostname must contain only lowercase letters, numbers, and hyphens'; + } + return null; +} + +export function validateMemory(memory: number): string | null { + if (memory < 512) { + return 'Memory must be at least 512 MB'; + } + return null; +} + +export function validateRequired(value: unknown, fieldName: string): string | null { + if (value === null || value === undefined || value === '') { + return `${fieldName} is required`; + } + return null; +} +``` + +## Data Models + +### Frontend State Models + +```typescript +// Provisioning state +interface ProvisioningState { + integrations: ProvisioningIntegration[]; + selectedIntegration: string | null; + loading: boolean; + error: string | null; +} + +// Form state +interface FormState { + data: T; + errors: Record; + touched: Record; + submitting: boolean; + submitError: string | null; +} + +// Action state +interface ActionState { + availableActions: LifecycleAction[]; + executingAction: string | null; + lastResult: ProvisioningResult | null; +} +``` + +### API Response Models + +```typescript +// Integration list response +interface IntegrationListResponse { + integrations: ProvisioningIntegration[]; + _debug?: DebugInfo; +} + +// Provisioning response +interface ProvisioningResponse { + success: boolean; + taskId: string; + vmid?: number; + nodeId?: string; + message: string; + _debug?: DebugInfo; +} + +// Action response +interface ActionResponse { + success: boolean; + taskId: string; + status: string; + message: string; + _debug?: DebugInfo; +} + +// Configuration response +interface ConfigResponse { + success: boolean; + message: string; + _debug?: DebugInfo; +} +``` + +### Navigation Updates + +```typescript +// Add to Router.svelte routes +const routes = { + '/': HomePage, + '/provision': { component: ProvisionPage, requiresAuth: true }, + '/nodes/:id': NodeDetailPage, + '/setup/:integration': IntegrationSetupPage, + // ... existing routes +}; +``` + +### Navigation Component Updates + +```typescript +// Add to Navigation.svelte +{#if authManager.isAuthenticated && hasProvisioningPermission} + + + Provision + +{/if} +``` + +## Data Models (continued) + +### Permission Model + +```typescript +// User permissions (from auth context) +interface UserPermissions { + canProvision: boolean; + canManageVMs: boolean; + canDestroyVMs: boolean; + allowedIntegrations: string[]; + allowedActions: string[]; +} + +// Permission check utility +function hasPermission(action: string, integration: string): boolean { + const permissions = authManager.user?.permissions; + if (!permissions) return false; + + return permissions.allowedActions.includes(action) && + permissions.allowedIntegrations.includes(integration); +} +``` + +## Correctness Properties + +*A property is a characteristic or behavior that should hold true across all valid executions of a system—essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.* + +### Property Reflection + +After analyzing all acceptance criteria, I identified the following redundancies: + +- Properties 6.4-6.7 cover action execution, success handling, error handling, and loading states for all lifecycle actions +- Properties 11.2-11.5 all test form validation for different field types and can be combined into comprehensive validation properties +- Properties 12.1-12.7 cover error and success notification patterns that apply across all operations +- Multiple "example" tests for VM and LXC operations follow the same patterns and can be consolidated + +The properties below represent the unique, non-redundant correctness guarantees for this feature. + +### Property 1: Integration Discovery and Display + +*For any* list of provisioning integrations returned by the backend, the Provision page should display all integrations that have at least one provisioning capability, and hide integrations with zero capabilities. + +**Validates: Requirements 1.4, 2.2, 2.3** + +### Property 2: Permission-Based UI Visibility + +*For any* UI element (menu item, button, form, tab) that requires a specific permission, if the current user lacks that permission, the element should not be rendered in the DOM. + +**Validates: Requirements 1.3, 5.4, 9.2, 9.3** + +### Property 3: Action Button Availability + +*For any* lifecycle action and node state combination, action buttons should only be displayed when the node's current state matches one of the action's `availableWhen` states. + +**Validates: Requirements 6.1, 6.2, 6.3** + +### Property 4: Action Execution Triggers API Call + +*For any* lifecycle action button that is clicked, the frontend should send an API request to the integration endpoint with the correct action name and node identifier. + +**Validates: Requirements 6.4** + +### Property 5: Successful Action Handling + +*For any* action that completes successfully, the frontend should display a success notification containing relevant details and refresh the node status data. + +**Validates: Requirements 6.5, 12.5** + +### Property 6: Failed Action Error Display + +*For any* action that fails, the frontend should display an error notification containing the error message from the backend response. + +**Validates: Requirements 6.6, 12.1, 12.2** + +### Property 7: Loading State During Actions + +*For any* action that is in progress, all action buttons should be disabled and a loading indicator should be visible until the action completes or fails. + +**Validates: Requirements 3.6, 4.6, 6.7** + +### Property 8: Form Validation Completeness + +*For any* form field with validation rules (required, format, range, length), submitting the form with invalid data should display an error message for that field and prevent submission. + +**Validates: Requirements 11.1, 11.2, 11.3, 11.4, 11.5** + +### Property 9: Valid Form Enables Submission + +*For any* form where all fields pass their validation rules, the submit button should be enabled and submission should be allowed. + +**Validates: Requirements 11.6** + +### Property 10: Configuration Validation + +*For any* Proxmox configuration form submission, all required fields (host, port, authentication) must be validated before the configuration is sent to the backend. + +**Validates: Requirements 10.3** + +### Property 11: Error Notification Persistence + +*For any* error notification displayed to the user, it should remain visible until explicitly dismissed by the user (no auto-dismiss). + +**Validates: Requirements 12.7** + +### Property 12: Success Notification Auto-Dismiss + +*For any* success notification displayed to the user, it should automatically dismiss after exactly 5 seconds. + +**Validates: Requirements 12.6** + +### Property 13: Error Details Expandability + +*For any* error response that includes additional details beyond the main message, those details should be available in an expandable section of the error notification. + +**Validates: Requirements 12.3** + +### Property 14: Error Logging + +*For any* error that occurs (API failure, validation error, unexpected exception), the error should be logged to the browser console with sufficient context for debugging. + +**Validates: Requirements 12.4** + +### Property 15: Dynamic Form Generation + +*For any* integration capability with parameter metadata, the frontend should generate form fields matching the parameter types, validation rules, and default values specified in the metadata. + +**Validates: Requirements 13.1, 13.4** + +### Property 16: Integration Extensibility + +*For any* new provisioning integration added to the backend with valid capability metadata, the frontend should automatically discover it on the next page load and render appropriate UI without code changes. + +**Validates: Requirements 13.3** + +### Property 17: Dynamic Action Rendering + +*For any* set of lifecycle capabilities returned by the backend for a node, the Manage tab should render action buttons based on the capability metadata rather than hardcoded action names. + +**Validates: Requirements 13.5** + +## Error Handling + +### Error Categories + +The frontend will handle these error categories: + +1. **Network Errors**: Connection failures, timeouts +2. **Authentication Errors**: 401 responses, expired tokens +3. **Authorization Errors**: 403 responses, insufficient permissions +4. **Validation Errors**: 400 responses, invalid input +5. **Not Found Errors**: 404 responses, missing resources +6. **Server Errors**: 500+ responses, backend failures + +### Error Handling Strategy + +```typescript +// Centralized error handler +function handleApiError(error: unknown, context: string): void { + // Log to console for debugging + logger.error(context, 'API error', error); + + // Extract error message + const message = error instanceof Error ? error.message : 'Unknown error'; + + // Categorize and display appropriate notification + if (message.includes('401') || message.includes('unauthorized')) { + showError('Authentication required', 'Please log in and try again'); + router.navigate('/login'); + } else if (message.includes('403') || message.includes('permission')) { + showError('Permission denied', 'You do not have permission for this action'); + } else if (message.includes('404')) { + showError('Not found', 'The requested resource does not exist'); + } else if (message.includes('timeout')) { + showError('Request timed out', 'The operation took too long. Please try again'); + } else { + showError('Operation failed', message); + } +} +``` + +### Form Validation Errors + +```typescript +// Validation error display +interface ValidationResult { + valid: boolean; + errors: Record; +} + +function validateForm(data: Record, rules: ValidationRules): ValidationResult { + const errors: Record = {}; + + for (const [field, rule] of Object.entries(rules)) { + const value = data[field]; + + // Required validation + if (rule.required && !value) { + errors[field] = `${rule.label} is required`; + continue; + } + + // Type-specific validation + if (value) { + if (rule.type === 'number') { + const num = Number(value); + if (isNaN(num)) { + errors[field] = `${rule.label} must be a number`; + } else if (rule.min !== undefined && num < rule.min) { + errors[field] = `${rule.label} must be at least ${rule.min}`; + } else if (rule.max !== undefined && num > rule.max) { + errors[field] = `${rule.label} must be at most ${rule.max}`; + } + } else if (rule.type === 'string') { + const str = String(value); + if (rule.minLength && str.length < rule.minLength) { + errors[field] = `${rule.label} must be at least ${rule.minLength} characters`; + } else if (rule.maxLength && str.length > rule.maxLength) { + errors[field] = `${rule.label} must be at most ${rule.maxLength} characters`; + } else if (rule.pattern && !rule.pattern.test(str)) { + errors[field] = rule.patternMessage || `${rule.label} format is invalid`; + } + } + } + } + + return { + valid: Object.keys(errors).length === 0, + errors + }; +} +``` + +### Retry Logic + +The existing `api.ts` module provides retry logic for transient failures. Provisioning operations will use custom retry settings: + +```typescript +// Provisioning operations - no retries (user-initiated) +await post('/api/integrations/proxmox/provision/vm', params, { + maxRetries: 0, + showRetryNotifications: false +}); + +// Status queries - retry with backoff +await get('/api/integrations/provisioning', { + maxRetries: 2, + retryDelay: 1000 +}); +``` + +### User Feedback + +All operations provide immediate feedback: + +1. **Loading States**: Spinners, disabled buttons, progress indicators +2. **Success Messages**: Toast notifications with action details +3. **Error Messages**: Toast notifications with actionable guidance +4. **Validation Feedback**: Inline error messages below form fields +5. **Confirmation Dialogs**: For destructive actions (destroy VM/LXC) + +## Testing Strategy + +### Dual Testing Approach + +This feature will use both unit tests and property-based tests for comprehensive coverage: + +- **Unit Tests**: Verify specific examples, edge cases, and integration points +- **Property Tests**: Verify universal properties across all inputs + +### Unit Testing Focus + +Unit tests will cover: + +1. **Component Rendering**: Specific UI elements render correctly +2. **User Interactions**: Click handlers, form submissions, navigation +3. **Edge Cases**: Empty states, loading states, error states +4. **Integration Points**: API client calls, router navigation, auth checks +5. **Specific Examples**: VM creation with valid data, LXC destruction flow + +Example unit tests: + +```typescript +// Component rendering +test('ProvisionPage displays Proxmox integration when available', async () => { + const mockIntegrations = [{ name: 'proxmox', capabilities: [...] }]; + // Mock API response and verify rendering +}); + +// User interaction +test('clicking Start button calls executeAction with correct parameters', async () => { + const mockExecuteAction = vi.fn(); + // Render component, click button, verify API call +}); + +// Edge case +test('ManageTab shows "no actions available" when user has no permissions', () => { + // Render with empty permissions, verify message +}); +``` + +### Property-Based Testing Configuration + +**Library**: fast-check (JavaScript/TypeScript property-based testing) + +**Configuration**: + +- Minimum 100 iterations per property test +- Each test tagged with feature name and property reference +- Custom generators for domain types (integrations, capabilities, permissions) + +**Tag Format**: `Feature: proxmox-frontend-ui, Property {number}: {property_text}` + +Example property tests: + +```typescript +import fc from 'fast-check'; + +// Property 1: Integration Discovery and Display +test('Feature: proxmox-frontend-ui, Property 1: displays integrations with capabilities', () => { + fc.assert( + fc.property( + fc.array(integrationArbitrary()), + (integrations) => { + const displayed = filterDisplayableIntegrations(integrations); + const expected = integrations.filter(i => i.capabilities.length > 0); + expect(displayed).toEqual(expected); + } + ), + { numRuns: 100 } + ); +}); + +// Property 8: Form Validation Completeness +test('Feature: proxmox-frontend-ui, Property 8: invalid fields prevent submission', () => { + fc.assert( + fc.property( + fc.record({ + vmid: fc.integer({ min: -1000, max: 1000000000 }), + name: fc.string(), + memory: fc.integer({ min: 0, max: 100000 }) + }), + (formData) => { + const result = validateVMForm(formData); + const hasInvalidVMID = formData.vmid < 100 || formData.vmid > 999999999; + const hasInvalidMemory = formData.memory < 512; + const hasInvalidName = !formData.name || formData.name.length === 0; + + if (hasInvalidVMID || hasInvalidMemory || hasInvalidName) { + expect(result.valid).toBe(false); + expect(Object.keys(result.errors).length).toBeGreaterThan(0); + } + } + ), + { numRuns: 100 } + ); +}); + +// Property 15: Dynamic Form Generation +test('Feature: proxmox-frontend-ui, Property 15: generates fields from metadata', () => { + fc.assert( + fc.property( + fc.array(capabilityParameterArbitrary()), + (parameters) => { + const fields = generateFormFields(parameters); + expect(fields.length).toBe(parameters.length); + + parameters.forEach((param, index) => { + expect(fields[index].name).toBe(param.name); + expect(fields[index].type).toBe(param.type); + expect(fields[index].required).toBe(param.required); + }); + } + ), + { numRuns: 100 } + ); +}); +``` + +### Custom Generators + +```typescript +// Generator for provisioning integrations +function integrationArbitrary(): fc.Arbitrary { + return fc.record({ + name: fc.constantFrom('proxmox', 'ec2', 'azure', 'terraform'), + displayName: fc.string(), + type: fc.constantFrom('virtualization', 'cloud', 'container'), + status: fc.constantFrom('connected', 'degraded', 'not_configured'), + capabilities: fc.array(capabilityArbitrary(), { minLength: 0, maxLength: 10 }) + }); +} + +// Generator for capability parameters +function capabilityParameterArbitrary(): fc.Arbitrary { + return fc.record({ + name: fc.string({ minLength: 1 }), + type: fc.constantFrom('string', 'number', 'boolean', 'object', 'array'), + required: fc.boolean(), + description: fc.option(fc.string()), + default: fc.anything() + }); +} + +// Generator for user permissions +function permissionsArbitrary(): fc.Arbitrary { + return fc.record({ + canProvision: fc.boolean(), + canManageVMs: fc.boolean(), + canDestroyVMs: fc.boolean(), + allowedIntegrations: fc.array(fc.string()), + allowedActions: fc.array(fc.constantFrom('start', 'stop', 'reboot', 'destroy')) + }); +} +``` + +### Test Organization + +``` +frontend/src/ +├── pages/ +│ ├── ProvisionPage.test.ts (unit + property tests) +│ └── NodeDetailPage.test.ts (unit tests for ManageTab) +├── components/ +│ ├── ProxmoxProvisionForm.test.ts (unit + property tests) +│ ├── ManageTab.test.ts (unit + property tests) +│ └── ProxmoxSetupGuide.test.ts (unit tests) +├── lib/ +│ ├── validation.test.ts (property tests) +│ └── provisioning.test.ts (property tests) +└── __tests__/ + └── generators.ts (custom fast-check generators) +``` + +### Integration Testing + +Integration tests will verify: + +1. Full provisioning flow from form submission to success notification +2. Error handling across component boundaries +3. Navigation between pages +4. Permission checks across multiple components + +### Test Execution + +```bash +# Run all tests +npm test -- --silent + +# Run specific test file +npm test -- ProvisionPage.test.ts --silent + +# Run property tests only +npm test -- --grep "Property [0-9]+" --silent + +# Run with coverage +npm test -- --coverage --silent +``` + +### Coverage Goals + +- **Line Coverage**: > 80% +- **Branch Coverage**: > 75% +- **Function Coverage**: > 85% +- **Property Test Coverage**: All 17 properties implemented diff --git a/.kiro/specs/090/proxmox-frontend-ui/requirements.md b/.kiro/specs/090/proxmox-frontend-ui/requirements.md new file mode 100644 index 00000000..35cb03b4 --- /dev/null +++ b/.kiro/specs/090/proxmox-frontend-ui/requirements.md @@ -0,0 +1,200 @@ +# Requirements Document + +## Introduction + +This document specifies the requirements for adding Proxmox provisioning capabilities to the Pabawi frontend. The feature enables users with appropriate permissions to provision and manage virtual machines through available integrations (initially Proxmox, with future support for EC2, Azure, and Terraform). The system will dynamically discover provisioning capabilities from backend integrations and enforce role-based access control for all provisioning operations. + +## Glossary + +- **Pabawi_Frontend**: The web-based user interface for the Pabawi infrastructure management system +- **Integration_Manager**: Backend service that manages and exposes capabilities from various infrastructure integrations +- **Provisioning_Integration**: An integration that provides VM/container creation and lifecycle management capabilities +- **Proxmox_Integration**: The backend integration for Proxmox virtualization platform +- **VM**: Virtual Machine - a virtualized compute instance +- **LXC**: Linux Container - a lightweight virtualized environment +- **RBAC_System**: Role-Based Access Control system that manages user permissions +- **Provisioning_Capability**: A specific action that an integration can perform (create_vm, destroy_vm, start, stop, etc.) +- **Node_Detail_Page**: The page displaying information about a specific VM or LXC instance +- **Provision_Page**: The new page where users can create VMs using available integrations +- **Setup_Page**: The configuration page for integrations +- **Top_Menu**: The main navigation menu in the Pabawi frontend + +## Requirements + +### Requirement 1: Provision Page Navigation + +**User Story:** As a user with provisioning permissions, I want to access a dedicated provisioning page from the main menu, so that I can easily create new VMs and containers. + +#### Acceptance Criteria + +1. THE Pabawi_Frontend SHALL display a "Provision" entry in the Top_Menu +2. WHEN a user clicks the "Provision" menu entry, THE Pabawi_Frontend SHALL navigate to the Provision_Page +3. WHERE a user lacks provisioning permissions, THE Pabawi_Frontend SHALL hide the "Provision" menu entry +4. THE Provision_Page SHALL display all available Provisioning_Integrations + +### Requirement 2: Dynamic Integration Discovery + +**User Story:** As a system administrator, I want the frontend to automatically discover available provisioning integrations, so that new integrations work without frontend code changes. + +#### Acceptance Criteria + +1. WHEN the Provision_Page loads, THE Pabawi_Frontend SHALL query the Integration_Manager for available Provisioning_Integrations +2. FOR EACH Provisioning_Integration, THE Pabawi_Frontend SHALL retrieve the list of supported Provisioning_Capabilities +3. THE Pabawi_Frontend SHALL display only integrations that provide at least one provisioning capability +4. WHEN the Integration_Manager returns an error, THE Pabawi_Frontend SHALL display an error message and log the failure + +### Requirement 3: VM Creation Interface + +**User Story:** As a user with VM creation permissions, I want to create VMs through the Proxmox integration, so that I can provision infrastructure on demand. + +#### Acceptance Criteria + +1. WHERE Proxmox_Integration is available, THE Provision_Page SHALL display a VM creation form +2. THE VM creation form SHALL include fields for all required Proxmox VM parameters +3. WHEN a user submits the VM creation form with valid data, THE Pabawi_Frontend SHALL send a create_vm request to the Proxmox_Integration +4. WHEN the create_vm request succeeds, THE Pabawi_Frontend SHALL display a success message with the new VM identifier +5. IF the create_vm request fails, THEN THE Pabawi_Frontend SHALL display the error message returned by the Proxmox_Integration +6. WHILE a create_vm request is in progress, THE Pabawi_Frontend SHALL disable the submit button and display a loading indicator + +### Requirement 4: LXC Container Creation Interface + +**User Story:** As a user with container creation permissions, I want to create LXC containers through the Proxmox integration, so that I can provision lightweight compute resources. + +#### Acceptance Criteria + +1. WHERE Proxmox_Integration is available, THE Provision_Page SHALL display an LXC creation form +2. THE LXC creation form SHALL include fields for all required Proxmox LXC parameters +3. WHEN a user submits the LXC creation form with valid data, THE Pabawi_Frontend SHALL send a create_lxc request to the Proxmox_Integration +4. WHEN the create_lxc request succeeds, THE Pabawi_Frontend SHALL display a success message with the new LXC identifier +5. IF the create_lxc request fails, THEN THE Pabawi_Frontend SHALL display the error message returned by the Proxmox_Integration +6. WHILE a create_lxc request is in progress, THE Pabawi_Frontend SHALL disable the submit button and display a loading indicator + +### Requirement 5: Node Management Tab + +**User Story:** As a user managing VMs, I want to access lifecycle actions from the node detail page, so that I can control my VMs without navigating away. + +#### Acceptance Criteria + +1. THE Node_Detail_Page SHALL display a "Manage" tab +2. WHEN a user selects the "Manage" tab, THE Pabawi_Frontend SHALL display available lifecycle actions for the node +3. THE Pabawi_Frontend SHALL query the Integration_Manager for actions available for the specific node type +4. THE Pabawi_Frontend SHALL display only actions that the current user has permission to perform +5. WHERE no actions are available or permitted, THE Pabawi_Frontend SHALL display a message indicating no actions are available + +### Requirement 6: VM Lifecycle Actions + +**User Story:** As a user with VM management permissions, I want to start, stop, and control VMs from the manage tab, so that I can operate my infrastructure. + +#### Acceptance Criteria + +1. WHERE a VM is stopped, THE Manage_Tab SHALL display a "Start" action button +2. WHERE a VM is running, THE Manage_Tab SHALL display "Stop", "Shutdown", "Reboot", and "Suspend" action buttons +3. WHERE a VM is suspended, THE Manage_Tab SHALL display a "Resume" action button +4. WHEN a user clicks an action button, THE Pabawi_Frontend SHALL send the corresponding request to the Proxmox_Integration +5. WHEN an action request succeeds, THE Pabawi_Frontend SHALL display a success message and refresh the node status +6. IF an action request fails, THEN THE Pabawi_Frontend SHALL display the error message returned by the Proxmox_Integration +7. WHILE an action request is in progress, THE Pabawi_Frontend SHALL disable all action buttons and display a loading indicator + +### Requirement 7: VM Destruction + +**User Story:** As a user with VM destruction permissions, I want to delete VMs that are no longer needed, so that I can free up resources. + +#### Acceptance Criteria + +1. WHERE a user has destroy permissions, THE Manage_Tab SHALL display a "Destroy" action button +2. WHEN a user clicks the "Destroy" button, THE Pabawi_Frontend SHALL display a confirmation dialog with the VM identifier +3. WHEN a user confirms destruction, THE Pabawi_Frontend SHALL send a destroy_vm request to the Proxmox_Integration +4. WHEN the destroy_vm request succeeds, THE Pabawi_Frontend SHALL display a success message and navigate away from the Node_Detail_Page +5. IF the destroy_vm request fails, THEN THE Pabawi_Frontend SHALL display the error message and keep the user on the Node_Detail_Page +6. WHEN a user cancels the confirmation dialog, THE Pabawi_Frontend SHALL take no action + +### Requirement 8: LXC Container Destruction + +**User Story:** As a user with container destruction permissions, I want to delete LXC containers that are no longer needed, so that I can free up resources. + +#### Acceptance Criteria + +1. WHERE a user has destroy permissions for LXC, THE Manage_Tab SHALL display a "Destroy" action button +2. WHEN a user clicks the "Destroy" button for an LXC, THE Pabawi_Frontend SHALL display a confirmation dialog with the LXC identifier +3. WHEN a user confirms destruction, THE Pabawi_Frontend SHALL send a destroy_lxc request to the Proxmox_Integration +4. WHEN the destroy_lxc request succeeds, THE Pabawi_Frontend SHALL display a success message and navigate away from the Node_Detail_Page +5. IF the destroy_lxc request fails, THEN THE Pabawi_Frontend SHALL display the error message and keep the user on the Node_Detail_Page +6. WHEN a user cancels the confirmation dialog, THE Pabawi_Frontend SHALL take no action + +### Requirement 9: Role-Based Access Control + +**User Story:** As a system administrator, I want provisioning actions to respect user roles, so that users can only perform authorized operations. + +#### Acceptance Criteria + +1. WHEN the Pabawi_Frontend requests available actions, THE RBAC_System SHALL return only actions the user is permitted to perform +2. THE Pabawi_Frontend SHALL verify user permissions before displaying any provisioning UI elements +3. WHERE a user lacks permission for an action, THE Pabawi_Frontend SHALL hide the corresponding UI control +4. IF a user attempts an unauthorized action through API manipulation, THEN THE Integration_Manager SHALL reject the request with an authorization error +5. THE Pabawi_Frontend SHALL display authorization errors with a clear message indicating insufficient permissions + +### Requirement 10: Proxmox Integration Setup UI + +**User Story:** As a system administrator, I want to configure the Proxmox integration through a user interface, so that I can set up the integration without editing configuration files. + +#### Acceptance Criteria + +1. THE Setup_Page SHALL display a configuration form for Proxmox_Integration +2. THE Proxmox configuration form SHALL include fields for host, port, authentication credentials, and connection options +3. WHEN a user submits the Proxmox configuration form, THE Pabawi_Frontend SHALL validate all required fields are populated +4. WHEN validation passes, THE Pabawi_Frontend SHALL send the configuration to the Integration_Manager +5. WHEN the configuration is saved successfully, THE Pabawi_Frontend SHALL display a success message +6. IF the configuration save fails, THEN THE Pabawi_Frontend SHALL display the error message returned by the Integration_Manager +7. THE Proxmox configuration form SHALL include a "Test Connection" button that verifies connectivity before saving + +### Requirement 11: Input Validation + +**User Story:** As a user, I want the system to validate my inputs before submission, so that I receive immediate feedback on errors. + +#### Acceptance Criteria + +1. THE Pabawi_Frontend SHALL validate all form inputs before enabling the submit button +2. WHEN a required field is empty, THE Pabawi_Frontend SHALL display a validation error message below the field +3. WHEN a field contains invalid data format, THE Pabawi_Frontend SHALL display a format error message below the field +4. THE Pabawi_Frontend SHALL validate numeric fields are within acceptable ranges +5. THE Pabawi_Frontend SHALL validate string fields meet length requirements +6. WHEN all validations pass, THE Pabawi_Frontend SHALL enable the submit button + +### Requirement 12: Error Handling and User Feedback + +**User Story:** As a user, I want clear feedback when operations fail, so that I can understand and resolve issues. + +#### Acceptance Criteria + +1. WHEN any API request fails, THE Pabawi_Frontend SHALL display an error notification +2. THE error notification SHALL include the error message returned by the backend +3. WHERE the backend provides error details, THE Pabawi_Frontend SHALL display them in an expandable section +4. THE Pabawi_Frontend SHALL log all errors to the browser console for debugging +5. WHEN an operation succeeds, THE Pabawi_Frontend SHALL display a success notification with relevant details +6. THE Pabawi_Frontend SHALL automatically dismiss success notifications after 5 seconds +7. THE Pabawi_Frontend SHALL keep error notifications visible until the user dismisses them + +### Requirement 13: Extensibility for Future Integrations + +**User Story:** As a developer, I want the provisioning UI to be integration-agnostic, so that adding new provisioning integrations requires minimal frontend changes. + +#### Acceptance Criteria + +1. THE Provision_Page SHALL render provisioning forms based on integration capability metadata +2. THE Pabawi_Frontend SHALL not contain hardcoded logic specific to Proxmox_Integration +3. WHEN a new Provisioning_Integration is added to the backend, THE Pabawi_Frontend SHALL automatically discover and display it +4. THE Pabawi_Frontend SHALL support dynamic form generation based on integration-provided parameter schemas +5. THE Manage_Tab SHALL render action buttons based on capability metadata rather than hardcoded integration names + +### Requirement 14: Documentation Updates + +**User Story:** As a user or administrator, I want up-to-date documentation, so that I can understand how to use the new provisioning features. + +#### Acceptance Criteria + +1. THE documentation SHALL include a guide for using the Provision_Page +2. THE documentation SHALL include instructions for configuring the Proxmox_Integration +3. THE documentation SHALL explain the required permissions for each provisioning action +4. THE documentation SHALL include screenshots of the provisioning UI +5. THE documentation SHALL describe how to use the Manage_Tab for VM lifecycle operations +6. THE documentation SHALL include troubleshooting steps for common provisioning errors diff --git a/.kiro/specs/090/proxmox-frontend-ui/tasks.md b/.kiro/specs/090/proxmox-frontend-ui/tasks.md new file mode 100644 index 00000000..11459e38 --- /dev/null +++ b/.kiro/specs/090/proxmox-frontend-ui/tasks.md @@ -0,0 +1,322 @@ +# Implementation Plan: Proxmox Frontend UI + +## Overview + +This implementation plan adds Proxmox provisioning capabilities to the Pabawi frontend. The feature includes a new Provision page for creating VMs and LXC containers, a Manage tab on node detail pages for lifecycle operations, and integration setup UI for Proxmox configuration. The implementation follows a dynamic, integration-agnostic architecture using Svelte 5 with TypeScript. + +## Tasks + +- [x] 1. Create core type definitions and API client methods + - [x] 1.1 Define TypeScript interfaces for provisioning types + - Create `frontend/src/lib/types/provisioning.ts` with interfaces for ProvisioningIntegration, ProvisioningCapability, CapabilityParameter, ProxmoxVMParams, ProxmoxLXCParams, LifecycleAction, ProvisioningResult, and API response types + - _Requirements: 2.1, 2.2, 13.1_ + + - [x] 1.2 Add provisioning API methods to api.ts + - Add methods: `getProvisioningIntegrations()`, `createProxmoxVM()`, `createProxmoxLXC()`, `executeNodeAction()`, `destroyNode()`, `saveProxmoxConfig()`, `testProxmoxConnection()` + - Configure retry logic: no retries for provisioning operations, 2 retries for status queries + - _Requirements: 2.1, 3.3, 4.3, 6.4, 7.3, 8.3, 10.4_ + + - [x] 1.3 Write property test for API client methods + - **Property 4: Action Execution Triggers API Call** + - **Validates: Requirements 6.4** + +- [x] 2. Create form validation utilities + - [x] 2.1 Implement validation functions in lib/validation.ts + - Create validation functions: `validateVMID()`, `validateHostname()`, `validateMemory()`, `validateRequired()`, `validateNumericRange()`, `validateStringPattern()` + - Each function returns error message string or null + - _Requirements: 11.1, 11.2, 11.3, 11.4, 11.5_ + + - [x] 2.2 Implement generic form validation utility + - Create `validateForm()` function that accepts data and validation rules, returns ValidationResult with errors object + - Support validation types: required, number (min/max), string (minLength/maxLength/pattern) + - _Requirements: 11.1, 11.6_ + + - [x] 2.3 Write property tests for validation utilities + - **Property 8: Form Validation Completeness** + - **Property 9: Valid Form Enables Submission** + - **Validates: Requirements 11.1, 11.2, 11.3, 11.4, 11.5, 11.6** + +- [x] 3. Implement ProvisionPage component + - [x] 3.1 Create ProvisionPage.svelte with integration discovery + - Create `frontend/src/pages/ProvisionPage.svelte` with state management using Svelte 5 runes + - Implement `fetchIntegrations()` to query `/api/integrations/provisioning` + - Display loading state, error state, and integration list + - Filter and display only integrations with at least one capability + - _Requirements: 1.2, 1.4, 2.1, 2.2, 2.3, 2.4_ + + - [x] 3.2 Add integration selector for multiple integrations + - Implement tab or dropdown selector when multiple integrations are available + - Default to first available integration + - _Requirements: 1.4, 2.2_ + + - [x] 3.3 Write unit tests for ProvisionPage + - Test integration discovery, loading states, error handling, empty states + - _Requirements: 1.2, 1.4, 2.1, 2.3, 2.4_ + + - [x] 3.4 Write property tests for ProvisionPage + - **Property 1: Integration Discovery and Display** + - **Property 16: Integration Extensibility** + - **Validates: Requirements 1.4, 2.2, 2.3, 13.3** + +- [x] 4. Checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +- [x] 5. Implement ProxmoxProvisionForm component + - [x] 5.1 Create ProxmoxProvisionForm.svelte with tabbed interface + - Create `frontend/src/components/ProxmoxProvisionForm.svelte` with VM and LXC tabs + - Implement state management for activeTab, formData, validationErrors, submitting + - Add tab switching between 'vm' and 'lxc' modes + - _Requirements: 3.1, 4.1_ + + - [x] 5.2 Implement VM creation form + - Add form fields: vmid (required), name (required), node (required), cores, memory, sockets, cpu, scsi0, ide2, net0, ostype + - Implement real-time validation using validation utilities + - Display validation errors inline below each field + - Disable submit button when validation fails or submission in progress + - _Requirements: 3.2, 3.3, 3.6, 11.1, 11.2, 11.6_ + + - [x] 5.3 Implement LXC creation form + - Add form fields: vmid (required), hostname (required), node (required), ostemplate (required), cores, memory, rootfs, net0, password + - Implement real-time validation using validation utilities + - Display validation errors inline below each field + - Disable submit button when validation fails or submission in progress + - _Requirements: 4.2, 4.3, 4.6, 11.1, 11.2, 11.6_ + + - [x] 5.4 Implement form submission handlers + - Create `submitVMForm()` and `submitLXCForm()` functions + - Call appropriate API methods with form data + - Handle success: display success notification with VM/LXC ID, reset form + - Handle errors: display error notification with backend message + - Show loading indicator during submission + - _Requirements: 3.3, 3.4, 3.5, 3.6, 4.3, 4.4, 4.5, 4.6, 12.1, 12.2, 12.5_ + + - [x] 5.5 Write unit tests for ProxmoxProvisionForm + - Test form rendering, tab switching, field validation, submission success/error handling + - _Requirements: 3.1, 3.2, 4.1, 4.2, 11.1_ + + - [x] 5.6 Write property tests for form validation + - **Property 8: Form Validation Completeness** + - **Property 9: Valid Form Enables Submission** + - **Validates: Requirements 11.1, 11.6** + +- [x] 6. Implement ManageTab component for node lifecycle actions + - [x] 6.1 Create ManageTab.svelte component + - Create `frontend/src/components/ManageTab.svelte` with state management for availableActions, nodeStatus, actionInProgress, confirmDialog + - Accept props: nodeId, nodeType, currentStatus + - Implement `fetchAvailableActions()` to query backend for permitted actions + - _Requirements: 5.1, 5.2, 5.3, 5.4_ + + - [x] 6.2 Implement action button rendering with availability logic + - Define actionAvailability mapping (start: ['stopped'], stop: ['running'], etc.) + - Render action buttons only when node state matches availableWhen conditions + - Display "no actions available" message when appropriate + - _Requirements: 5.5, 6.1, 6.2, 6.3_ + + - [x] 6.3 Implement action execution handlers + - Create `executeAction(action: string)` function + - Call API with action name and node identifier + - Disable all buttons and show loading indicator during execution + - Handle success: display success notification, refresh node status + - Handle errors: display error notification with backend message + - _Requirements: 6.4, 6.5, 6.6, 6.7, 12.1, 12.2, 12.5_ + + - [x] 6.4 Implement confirmation dialog for destructive actions + - Create confirmation dialog component for destroy actions + - Show VM/LXC identifier in confirmation message + - Handle confirm: execute destroy action, navigate away on success + - Handle cancel: close dialog, take no action + - _Requirements: 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 8.1, 8.2, 8.3, 8.4, 8.5, 8.6_ + + - [x] 6.5 Write unit tests for ManageTab + - Test action button rendering, availability logic, execution handlers, confirmation dialogs + - _Requirements: 5.1, 5.2, 6.1, 6.2, 6.3, 7.1, 8.1_ + + - [x] 6.6 Write property tests for ManageTab + - **Property 3: Action Button Availability** + - **Property 5: Successful Action Handling** + - **Property 6: Failed Action Error Display** + - **Property 7: Loading State During Actions** + - **Property 17: Dynamic Action Rendering** + - **Validates: Requirements 6.1, 6.2, 6.3, 6.5, 6.6, 6.7, 13.5** + +- [x] 7. Checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +- [x] 8. Integrate ManageTab into NodeDetailPage + - [x] 8.1 Add ManageTab to NodeDetailPage.svelte + - Import ManageTab component + - Add "Manage" tab to existing tab navigation + - Pass nodeId, nodeType, and currentStatus props to ManageTab + - _Requirements: 5.1, 5.2_ + + - [x] 8.2 Write integration tests for NodeDetailPage with ManageTab + - Test tab navigation, prop passing, action execution flow + - _Requirements: 5.1, 5.2_ + +- [x] 9. Implement ProxmoxSetupGuide component + - [x] 9.1 Create ProxmoxSetupGuide.svelte configuration form + - Create `frontend/src/components/ProxmoxSetupGuide.svelte` with state management for config, testResult, saving + - Add form fields: host (required), port (required, 1-65535), username, password, realm, token, ssl.rejectUnauthorized + - Implement validation: host (valid hostname/IP), port (numeric range), authentication (username+password+realm OR token) + - Display warning when ssl.rejectUnauthorized is false + - _Requirements: 10.1, 10.2, 10.3_ + + - [x] 9.2 Implement connection test functionality + - Add "Test Connection" button + - Call `testProxmoxConnection()` API method with current config + - Display test result (success/failure) with message + - _Requirements: 10.7_ + + - [x] 9.3 Implement configuration save handler + - Create `saveConfiguration()` function + - Validate all required fields before submission + - Call `saveProxmoxConfig()` API method + - Handle success: display success notification + - Handle errors: display error notification with backend message + - _Requirements: 10.3, 10.4, 10.5, 10.6_ + + - [x] 9.4 Write unit tests for ProxmoxSetupGuide + - Test form rendering, validation, connection test, save handler + - _Requirements: 10.1, 10.2, 10.3, 10.7_ + + - [x] 9.5 Write property tests for configuration validation + - **Property 10: Configuration Validation** + - **Validates: Requirements 10.3** + +- [x] 10. Update navigation and routing + - [x] 10.1 Add Provision route to Router.svelte + - Add route: '/provision': { component: ProvisionPage, requiresAuth: true } + - _Requirements: 1.2_ + + - [x] 10.2 Add Provision menu item to Navigation.svelte + - Add "Provision" link to top menu with icon + - Conditionally render based on user provisioning permissions + - Hide menu item if user lacks provisioning permissions + - _Requirements: 1.1, 1.3, 9.2, 9.3_ + + - [x] 10.3 Add permission check utility + - Create `hasProvisioningPermission()` function in auth context + - Check user permissions from auth manager + - _Requirements: 1.3, 9.1, 9.2, 9.3_ + + - [x] 10.4 Write unit tests for navigation updates + - Test route registration, menu item rendering, permission checks + - _Requirements: 1.1, 1.2, 1.3_ + + - [x] 10.5 Write property tests for permission-based UI visibility + - **Property 2: Permission-Based UI Visibility** + - **Validates: Requirements 1.3, 5.4, 9.2, 9.3** + +- [x] 11. Implement notification system enhancements + - [x] 11.1 Add error notification with expandable details + - Enhance toast notification to support expandable error details section + - Display main error message prominently + - Show additional details in collapsible section when available + - _Requirements: 12.1, 12.2, 12.3_ + + - [x] 11.2 Implement notification persistence logic + - Error notifications: remain visible until user dismisses + - Success notifications: auto-dismiss after exactly 5 seconds + - _Requirements: 12.6, 12.7_ + + - [x] 11.3 Add error logging to console + - Log all errors to browser console with context + - Include error type, message, stack trace, and operation context + - _Requirements: 12.4_ + + - [x] 11.4 Write unit tests for notification system + - Test error display, success display, auto-dismiss timing, expandable details + - _Requirements: 12.1, 12.2, 12.3, 12.6, 12.7_ + + - [x] 11.5 Write property tests for notification behavior + - **Property 11: Error Notification Persistence** + - **Property 12: Success Notification Auto-Dismiss** + - **Property 13: Error Details Expandability** + - **Property 14: Error Logging** + - **Validates: Requirements 12.1, 12.3, 12.4, 12.6, 12.7** + +- [x] 12. Checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +- [x] 13. Implement dynamic form generation utilities + - [x] 13.1 Create form field generator from capability metadata + - Create `frontend/src/lib/formGenerator.ts` with `generateFormFields()` function + - Accept CapabilityParameter[] and return form field configuration + - Map parameter types to appropriate input types + - Apply validation rules from parameter metadata + - _Requirements: 13.1, 13.4_ + + - [x] 13.2 Write property tests for dynamic form generation + - **Property 15: Dynamic Form Generation** + - **Validates: Requirements 13.1, 13.4** + +- [x] 14. Create custom fast-check generators for property tests + - [x] 14.1 Implement test data generators + - Create `frontend/src/__tests__/generators.ts` with custom arbitraries + - Implement: `integrationArbitrary()`, `capabilityParameterArbitrary()`, `permissionsArbitrary()`, `nodeStateArbitrary()` + - Configure generators to produce realistic test data + - _Requirements: Testing strategy_ + + - [x] 14.2 Write tests for generators + - Verify generators produce valid data structures + - _Requirements: Testing strategy_ + +- [x] 15. Integration and wiring + - [x] 15.1 Wire all components together + - Verify ProvisionPage renders ProxmoxProvisionForm correctly + - Verify NodeDetailPage renders ManageTab correctly + - Verify IntegrationSetupPage renders ProxmoxSetupGuide correctly + - Test navigation flow: menu → provision page → form submission → success + - Test management flow: node detail → manage tab → action execution → status refresh + - _Requirements: All requirements_ + + - [x] 15.2 Write end-to-end integration tests + - Test complete provisioning flow from navigation to VM creation + - Test complete management flow from node detail to action execution + - Test error handling across component boundaries + - _Requirements: All requirements_ + +- [x] 16. Update documentation + - [x] 16.1 Create user guide for Provision page + - Document how to access and use the Provision page + - Include screenshots of VM and LXC creation forms + - Explain form fields and validation requirements + - _Requirements: 14.1, 14.4_ + + - [x] 16.2 Create Proxmox integration setup guide + - Document configuration steps for Proxmox integration + - Include connection test instructions + - Explain authentication options (username/password vs token) + - _Requirements: 14.2_ + + - [x] 16.3 Document permissions and RBAC + - List required permissions for each provisioning action + - Explain how permissions affect UI visibility + - _Requirements: 14.3_ + + - [x] 16.4 Create Manage tab usage guide + - Document lifecycle operations available in Manage tab + - Explain action availability based on node state + - Include screenshots of action buttons and confirmation dialogs + - _Requirements: 14.5_ + + - [x] 16.5 Add troubleshooting section + - Document common provisioning errors and solutions + - Include API error codes and meanings + - Provide debugging tips + - _Requirements: 14.6_ + +- [x] 17. Final checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +## Notes + +- Tasks marked with `*` are optional and can be skipped for faster MVP +- Each task references specific requirements for traceability +- Checkpoints ensure incremental validation +- Property tests validate universal correctness properties (17 properties total) +- Unit tests validate specific examples and edge cases +- The implementation uses Svelte 5 with runes-based reactivity and TypeScript +- All API interactions use the existing api.ts client with proper retry logic +- RBAC is enforced at both backend and frontend levels +- The design is integration-agnostic to support future provisioning integrations diff --git a/.kiro/specs/090/proxmox-integration/.config.kiro b/.kiro/specs/090/proxmox-integration/.config.kiro new file mode 100644 index 00000000..2f35c2e8 --- /dev/null +++ b/.kiro/specs/090/proxmox-integration/.config.kiro @@ -0,0 +1 @@ +{"specId": "14090baf-461c-4a8b-a016-ce48ca39edfc", "workflowType": "requirements-first", "specType": "feature"} diff --git a/.kiro/specs/090/proxmox-integration/design.md b/.kiro/specs/090/proxmox-integration/design.md new file mode 100644 index 00000000..eff2d003 --- /dev/null +++ b/.kiro/specs/090/proxmox-integration/design.md @@ -0,0 +1,1975 @@ +# Proxmox Integration Design Document + +## Overview + +This document describes the design for integrating Proxmox Virtual Environment (VE) into Pabawi. The integration follows the established plugin architecture pattern used by existing integrations (PuppetDB, Bolt, Ansible, SSH) and introduces a new "provisioning" capability type to enable VM and container lifecycle management. + +### Key Components + +- **ProxmoxIntegration**: Plugin class that implements both InformationSourcePlugin and ExecutionToolPlugin interfaces +- **ProxmoxService**: Business logic layer that orchestrates API calls and data transformation +- **ProxmoxClient**: Low-level HTTP client for Proxmox API communication with authentication and retry logic +- **ProvisioningCapability**: New capability interface for infrastructure provisioning operations + +### Design Goals + +1. Follow existing plugin architecture patterns for consistency +2. Support both information retrieval (inventory, facts, groups) and execution (actions, provisioning) +3. Provide robust error handling and resilience through retry logic and circuit breakers +4. Enable efficient operations through caching and parallel API calls +5. Support both password and token-based authentication +6. Introduce provisioning capabilities for VM and container lifecycle management + +## Architecture + +### High-Level Component Diagram + +```mermaid +graph TB + IM[IntegrationManager] + PI[ProxmoxIntegration
BasePlugin] + PS[ProxmoxService] + PC[ProxmoxClient] + PVE[Proxmox VE API] + + IM -->|registers| PI + PI -->|delegates to| PS + PS -->|uses| PC + PC -->|HTTP/HTTPS| PVE + + PI -.->|implements| ISP[InformationSourcePlugin] + PI -.->|implements| ETP[ExecutionToolPlugin] + + PS -->|uses| Cache[SimpleCache] + PS -->|uses| Logger[LoggerService] + PS -->|uses| Perf[PerformanceMonitorService] +``` + +### Layer Responsibilities + +**ProxmoxIntegration (Plugin Layer)** + +- Extends BasePlugin +- Implements InformationSourcePlugin and ExecutionToolPlugin interfaces +- Handles plugin lifecycle (initialization, health checks) +- Delegates business logic to ProxmoxService +- Manages configuration validation + +**ProxmoxService (Business Logic Layer)** + +- Orchestrates API calls through ProxmoxClient +- Transforms Proxmox API responses to Pabawi data models +- Implements caching strategy for inventory, groups, and facts +- Handles data aggregation and grouping logic +- Manages provisioning operations (create/destroy VMs and containers) + +**ProxmoxClient (HTTP Client Layer)** + +- Handles HTTP/HTTPS communication with Proxmox API +- Manages authentication (ticket-based and token-based) +- Implements retry logic with exponential backoff +- Handles task polling for long-running operations +- Transforms HTTP errors into domain-specific exceptions + +### Data Flow Diagrams + +#### Inventory Retrieval Flow + +```mermaid +sequenceDiagram + participant IM as IntegrationManager + participant PI as ProxmoxIntegration + participant PS as ProxmoxService + participant Cache as SimpleCache + participant PC as ProxmoxClient + participant PVE as Proxmox API + + IM->>PI: getInventory() + PI->>PS: getInventory() + PS->>Cache: get("inventory:all") + alt Cache Hit + Cache-->>PS: cached nodes + PS-->>PI: Node[] + else Cache Miss + PS->>PC: query("/api2/json/cluster/resources", type=vm) + PC->>PVE: GET /api2/json/cluster/resources?type=vm + PVE-->>PC: guest data + PC-->>PS: raw response + PS->>PS: transform to Node[] + PS->>Cache: set("inventory:all", nodes, 60s) + PS-->>PI: Node[] + end + PI-->>IM: Node[] +``` + +#### VM Provisioning Flow + +```mermaid +sequenceDiagram + participant User + participant PI as ProxmoxIntegration + participant PS as ProxmoxService + participant PC as ProxmoxClient + participant PVE as Proxmox API + + User->>PI: executeAction({type: "provision", action: "create_vm", params}) + PI->>PS: createVM(params) + PS->>PC: post("/api2/json/nodes/{node}/qemu", params) + PC->>PVE: POST /api2/json/nodes/{node}/qemu + PVE-->>PC: {data: "UPID:..."} + PC->>PC: waitForTask(taskId) + loop Poll every 2s + PC->>PVE: GET /api2/json/nodes/{node}/tasks/{upid}/status + PVE-->>PC: {status: "running"} + end + PVE-->>PC: {status: "stopped", exitstatus: "OK"} + PC-->>PS: success + PS->>Cache: clear inventory cache + PS-->>PI: ExecutionResult{success: true, vmid} + PI-->>User: ExecutionResult +``` + +#### Authentication Flow + +```mermaid +sequenceDiagram + participant PS as ProxmoxService + participant PC as ProxmoxClient + participant PVE as Proxmox API + + PS->>PC: initialize(config) + alt Token Authentication + PC->>PC: store token + Note over PC: Use token in Authorization header + else Password Authentication + PC->>PVE: POST /api2/json/access/ticket + Note over PC,PVE: {username, password, realm} + PVE-->>PC: {ticket, CSRFPreventionToken} + PC->>PC: store ticket & CSRF token + end + + PS->>PC: query(endpoint) + PC->>PVE: GET endpoint (with auth) + alt Ticket Expired (401) + PVE-->>PC: 401 Unauthorized + PC->>PVE: POST /api2/json/access/ticket + PVE-->>PC: new ticket + PC->>PVE: GET endpoint (with new ticket) + PVE-->>PC: success + else Success + PVE-->>PC: data + end + PC-->>PS: data +``` + +## Components and Interfaces + +### ProxmoxIntegration Class + +**File**: `pabawi/backend/src/integrations/proxmox/ProxmoxIntegration.ts` + +```typescript +export class ProxmoxIntegration extends BasePlugin + implements InformationSourcePlugin, ExecutionToolPlugin { + + type = "both" as const; + private service: ProxmoxService; + + constructor(logger?: LoggerService, performanceMonitor?: PerformanceMonitorService) { + super("proxmox", "both", logger, performanceMonitor); + } + + protected async performInitialization(): Promise { + // Extract and validate Proxmox configuration + const config = this.config.config as ProxmoxConfig; + this.validateProxmoxConfig(config); + + // Initialize service with configuration + this.service = new ProxmoxService(config, this.logger, this.performanceMonitor); + await this.service.initialize(); + } + + protected async performHealthCheck(): Promise> { + return await this.service.healthCheck(); + } + + // InformationSourcePlugin methods + async getInventory(): Promise { + return await this.service.getInventory(); + } + + async getGroups(): Promise { + return await this.service.getGroups(); + } + + async getNodeFacts(nodeId: string): Promise { + return await this.service.getNodeFacts(nodeId); + } + + async getNodeData(nodeId: string, dataType: string): Promise { + return await this.service.getNodeData(nodeId, dataType); + } + + // ExecutionToolPlugin methods + async executeAction(action: Action): Promise { + return await this.service.executeAction(action); + } + + listCapabilities(): Capability[] { + return this.service.listCapabilities(); + } + + listProvisioningCapabilities(): ProvisioningCapability[] { + return this.service.listProvisioningCapabilities(); + } + + private validateProxmoxConfig(config: ProxmoxConfig): void { + // Validate host (hostname or IP) + if (!config.host || typeof config.host !== 'string') { + throw new Error('Proxmox configuration must include a valid host'); + } + + // Validate port range + if (config.port && (config.port < 1 || config.port > 65535)) { + throw new Error('Proxmox port must be between 1 and 65535'); + } + + // Validate authentication + if (!config.token && !config.password) { + throw new Error('Proxmox configuration must include either token or password authentication'); + } + + // Validate realm for password auth + if (config.password && !config.realm) { + throw new Error('Proxmox password authentication requires a realm'); + } + + // Log security warning if cert verification disabled + if (config.ssl?.rejectUnauthorized === false) { + this.logger.warn('TLS certificate verification is disabled - this is insecure', { + component: 'ProxmoxIntegration', + operation: 'validateProxmoxConfig' + }); + } + } +} +``` + +### ProxmoxService Class + +**File**: `pabawi/backend/src/integrations/proxmox/ProxmoxService.ts` + +```typescript +export class ProxmoxService { + private client: ProxmoxClient; + private cache: SimpleCache; + private logger: LoggerService; + private performanceMonitor: PerformanceMonitorService; + private config: ProxmoxConfig; + + constructor( + config: ProxmoxConfig, + logger: LoggerService, + performanceMonitor: PerformanceMonitorService + ) { + this.config = config; + this.logger = logger; + this.performanceMonitor = performanceMonitor; + this.cache = new SimpleCache({ ttl: 60000 }); // Default 60s + } + + async initialize(): Promise { + this.client = new ProxmoxClient(this.config, this.logger); + await this.client.authenticate(); + } + + async healthCheck(): Promise> { + try { + const version = await this.client.get('/api2/json/version'); + return { + healthy: true, + message: 'Proxmox API is reachable', + details: { version } + }; + } catch (error) { + if (error instanceof ProxmoxAuthenticationError) { + return { + healthy: false, + degraded: true, + message: 'Authentication failed', + details: { error: error.message } + }; + } + return { + healthy: false, + message: 'Proxmox API is unreachable', + details: { error: error instanceof Error ? error.message : String(error) } + }; + } + } + + async getInventory(): Promise { + const cacheKey = 'inventory:all'; + const cached = this.cache.get(cacheKey); + if (cached) return cached as Node[]; + + const complete = this.performanceMonitor.startTimer('proxmox:getInventory'); + + try { + // Query all cluster resources (VMs and containers) + const resources = await this.client.get('/api2/json/cluster/resources?type=vm'); + + if (!Array.isArray(resources)) { + throw new Error('Unexpected response format from Proxmox API'); + } + + const nodes = resources.map(guest => this.transformGuestToNode(guest)); + + this.cache.set(cacheKey, nodes, 60000); // Cache for 60s + complete({ cached: false, nodeCount: nodes.length }); + + return nodes; + } catch (error) { + complete({ error: error instanceof Error ? error.message : String(error) }); + throw error; + } + } + + async getGroups(): Promise { + const cacheKey = 'groups:all'; + const cached = this.cache.get(cacheKey); + if (cached) return cached as NodeGroup[]; + + const inventory = await this.getInventory(); + const groups: NodeGroup[] = []; + + // Group by node + const nodeGroups = this.groupByNode(inventory); + groups.push(...nodeGroups); + + // Group by status + const statusGroups = this.groupByStatus(inventory); + groups.push(...statusGroups); + + // Group by type (VM vs LXC) + const typeGroups = this.groupByType(inventory); + groups.push(...typeGroups); + + this.cache.set(cacheKey, groups, 60000); // Cache for 60s + + return groups; + } + + async getNodeFacts(nodeId: string): Promise { + const cacheKey = `facts:${nodeId}`; + const cached = this.cache.get(cacheKey); + if (cached) return cached as Facts; + + // Parse VMID from nodeId (format: "proxmox:{node}:{vmid}") + const vmid = this.parseVMID(nodeId); + const node = this.parseNodeName(nodeId); + + // Determine guest type and fetch configuration + const guestType = await this.getGuestType(node, vmid); + const endpoint = guestType === 'lxc' + ? `/api2/json/nodes/${node}/lxc/${vmid}/config` + : `/api2/json/nodes/${node}/qemu/${vmid}/config`; + + const config = await this.client.get(endpoint); + + // Fetch current status + const statusEndpoint = guestType === 'lxc' + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/current` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/current`; + + const status = await this.client.get(statusEndpoint); + + const facts = this.transformToFacts(config, status, guestType); + + this.cache.set(cacheKey, facts, 30000); // Cache for 30s + + return facts; + } + + async executeAction(action: Action): Promise { + const { type, target, action: actionName, parameters } = action; + + if (type === 'provision') { + return await this.executeProvisioningAction(actionName, parameters); + } + + // Handle lifecycle actions (start, stop, etc.) + return await this.executeLifecycleAction(target, actionName); + } + + listCapabilities(): Capability[] { + return [ + { + name: 'start', + description: 'Start a VM or container', + parameters: [] + }, + { + name: 'stop', + description: 'Force stop a VM or container', + parameters: [] + }, + { + name: 'shutdown', + description: 'Gracefully shutdown a VM or container', + parameters: [] + }, + { + name: 'reboot', + description: 'Reboot a VM or container', + parameters: [] + }, + { + name: 'suspend', + description: 'Suspend a VM', + parameters: [] + }, + { + name: 'resume', + description: 'Resume a suspended VM', + parameters: [] + } + ]; + } + + listProvisioningCapabilities(): ProvisioningCapability[] { + return [ + { + name: 'create_vm', + description: 'Create a new virtual machine', + operation: 'create', + parameters: [ + { name: 'vmid', type: 'number', required: true }, + { name: 'name', type: 'string', required: true }, + { name: 'node', type: 'string', required: true }, + { name: 'cores', type: 'number', required: false, default: 1 }, + { name: 'memory', type: 'number', required: false, default: 512 }, + { name: 'disk', type: 'string', required: false }, + { name: 'network', type: 'object', required: false } + ] + }, + { + name: 'create_lxc', + description: 'Create a new LXC container', + operation: 'create', + parameters: [ + { name: 'vmid', type: 'number', required: true }, + { name: 'name', type: 'string', required: true }, + { name: 'node', type: 'string', required: true }, + { name: 'ostemplate', type: 'string', required: true }, + { name: 'cores', type: 'number', required: false, default: 1 }, + { name: 'memory', type: 'number', required: false, default: 512 }, + { name: 'rootfs', type: 'string', required: false }, + { name: 'network', type: 'object', required: false } + ] + }, + { + name: 'destroy_vm', + description: 'Destroy a virtual machine', + operation: 'destroy', + parameters: [ + { name: 'vmid', type: 'number', required: true }, + { name: 'node', type: 'string', required: true } + ] + }, + { + name: 'destroy_lxc', + description: 'Destroy an LXC container', + operation: 'destroy', + parameters: [ + { name: 'vmid', type: 'number', required: true }, + { name: 'node', type: 'string', required: true } + ] + } + ]; + } + + async createVM(params: VMCreateParams): Promise { + // Validate VMID is unique + const exists = await this.guestExists(params.node, params.vmid); + if (exists) { + return { + success: false, + error: `VM with VMID ${params.vmid} already exists on node ${params.node}` + }; + } + + // Call Proxmox API to create VM + const endpoint = `/api2/json/nodes/${params.node}/qemu`; + const taskId = await this.client.post(endpoint, params); + + // Wait for task completion + await this.client.waitForTask(params.node, taskId); + + // Clear inventory cache + this.cache.delete('inventory:all'); + this.cache.delete('groups:all'); + + return { + success: true, + output: `VM ${params.vmid} created successfully`, + metadata: { vmid: params.vmid, node: params.node } + }; + } + + async createLXC(params: LXCCreateParams): Promise { + // Similar to createVM but for LXC containers + const exists = await this.guestExists(params.node, params.vmid); + if (exists) { + return { + success: false, + error: `Container with VMID ${params.vmid} already exists on node ${params.node}` + }; + } + + const endpoint = `/api2/json/nodes/${params.node}/lxc`; + const taskId = await this.client.post(endpoint, params); + + await this.client.waitForTask(params.node, taskId); + + this.cache.delete('inventory:all'); + this.cache.delete('groups:all'); + + return { + success: true, + output: `Container ${params.vmid} created successfully`, + metadata: { vmid: params.vmid, node: params.node } + }; + } + + async destroyGuest(node: string, vmid: number): Promise { + // Check if guest exists + const exists = await this.guestExists(node, vmid); + if (!exists) { + return { + success: false, + error: `Guest ${vmid} not found on node ${node}` + }; + } + + // Determine guest type + const guestType = await this.getGuestType(node, vmid); + + // Stop guest if running + const statusEndpoint = guestType === 'lxc' + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/current` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/current`; + + const status = await this.client.get(statusEndpoint); + if (status.status === 'running') { + const stopEndpoint = guestType === 'lxc' + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/stop` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/stop`; + + const stopTaskId = await this.client.post(stopEndpoint, {}); + await this.client.waitForTask(node, stopTaskId); + } + + // Delete guest + const deleteEndpoint = guestType === 'lxc' + ? `/api2/json/nodes/${node}/lxc/${vmid}` + : `/api2/json/nodes/${node}/qemu/${vmid}`; + + const deleteTaskId = await this.client.delete(deleteEndpoint); + await this.client.waitForTask(node, deleteTaskId); + + // Clear caches + this.cache.delete('inventory:all'); + this.cache.delete('groups:all'); + this.cache.delete(`facts:proxmox:${node}:${vmid}`); + + return { + success: true, + output: `Guest ${vmid} destroyed successfully` + }; + } + + clearCache(): void { + this.cache.clear(); + } + + // Private helper methods + private transformGuestToNode(guest: ProxmoxGuest): Node { /* ... */ } + private groupByNode(nodes: Node[]): NodeGroup[] { /* ... */ } + private groupByStatus(nodes: Node[]): NodeGroup[] { /* ... */ } + private groupByType(nodes: Node[]): NodeGroup[] { /* ... */ } + private parseVMID(nodeId: string): number { /* ... */ } + private parseNodeName(nodeId: string): string { /* ... */ } + private async getGuestType(node: string, vmid: number): Promise<'qemu' | 'lxc'> { /* ... */ } + private transformToFacts(config: unknown, status: unknown, type: string): Facts { /* ... */ } + private async executeProvisioningAction(action: string, params: unknown): Promise { /* ... */ } + private async executeLifecycleAction(target: string, action: string): Promise { /* ... */ } + private async guestExists(node: string, vmid: number): Promise { /* ... */ } +} +``` + +### ProxmoxClient Class + +**File**: `pabawi/backend/src/integrations/proxmox/ProxmoxClient.ts` + +```typescript +export class ProxmoxClient { + private baseUrl: string; + private config: ProxmoxConfig; + private logger: LoggerService; + private ticket?: string; + private csrfToken?: string; + private httpsAgent?: https.Agent; + private retryConfig: RetryConfig; + + constructor(config: ProxmoxConfig, logger: LoggerService) { + this.config = config; + this.logger = logger; + this.baseUrl = `https://${config.host}:${config.port || 8006}`; + + // Configure HTTPS agent + if (config.ssl) { + this.httpsAgent = this.createHttpsAgent(config.ssl); + } + + // Configure retry logic + this.retryConfig = { + maxAttempts: 3, + initialDelay: 1000, + maxDelay: 10000, + backoffMultiplier: 2, + retryableErrors: ['ECONNRESET', 'ETIMEDOUT', 'ENOTFOUND'] + }; + } + + async authenticate(): Promise { + if (this.config.token) { + // Token authentication - no need to fetch ticket + this.logger.info('Using token authentication', { + component: 'ProxmoxClient', + operation: 'authenticate' + }); + return; + } + + // Password authentication - fetch ticket + const endpoint = '/api2/json/access/ticket'; + const params = { + username: `${this.config.username}@${this.config.realm}`, + password: this.config.password + }; + + try { + const response = await this.request('POST', endpoint, params, false); + this.ticket = response.data.ticket; + this.csrfToken = response.data.CSRFPreventionToken; + + this.logger.info('Authentication successful', { + component: 'ProxmoxClient', + operation: 'authenticate' + }); + } catch (error) { + throw new ProxmoxAuthenticationError( + 'Failed to authenticate with Proxmox API', + error + ); + } + } + + async get(endpoint: string): Promise { + return await this.requestWithRetry('GET', endpoint); + } + + async post(endpoint: string, data: unknown): Promise { + const response = await this.requestWithRetry('POST', endpoint, data); + // Proxmox returns task ID (UPID) for async operations + return response.data as string; + } + + async delete(endpoint: string): Promise { + const response = await this.requestWithRetry('DELETE', endpoint); + return response.data as string; + } + + async waitForTask( + node: string, + taskId: string, + timeout: number = 300000 + ): Promise { + const startTime = Date.now(); + const pollInterval = 2000; // 2 seconds + + while (Date.now() - startTime < timeout) { + const endpoint = `/api2/json/nodes/${node}/tasks/${taskId}/status`; + const status = await this.get(endpoint); + + if (status.status === 'stopped') { + if (status.exitstatus === 'OK') { + return; + } else { + throw new ProxmoxError( + `Task failed: ${status.exitstatus}`, + 'TASK_FAILED', + status + ); + } + } + + await this.sleep(pollInterval); + } + + throw new ProxmoxError( + `Task timeout after ${timeout}ms`, + 'TASK_TIMEOUT', + { taskId, node } + ); + } + + private async requestWithRetry( + method: string, + endpoint: string, + data?: unknown + ): Promise { + let lastError: Error | undefined; + + for (let attempt = 1; attempt <= this.retryConfig.maxAttempts; attempt++) { + try { + return await this.request(method, endpoint, data); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + + // Don't retry authentication errors + if (error instanceof ProxmoxAuthenticationError) { + throw error; + } + + // Don't retry 4xx errors except 429 + if (error instanceof ProxmoxError && error.code.startsWith('HTTP_4')) { + if (error.code !== 'HTTP_429') { + throw error; + } + // Handle rate limiting + const retryAfter = error.details?.retryAfter || 5000; + await this.sleep(retryAfter); + continue; + } + + // Check if error is retryable + const isRetryable = this.retryConfig.retryableErrors.some( + errCode => lastError?.message.includes(errCode) + ); + + if (!isRetryable || attempt === this.retryConfig.maxAttempts) { + throw error; + } + + // Calculate backoff delay + const delay = Math.min( + this.retryConfig.initialDelay * Math.pow(this.retryConfig.backoffMultiplier, attempt - 1), + this.retryConfig.maxDelay + ); + + this.logger.warn(`Request failed, retrying (attempt ${attempt}/${this.retryConfig.maxAttempts})`, { + component: 'ProxmoxClient', + operation: 'requestWithRetry', + metadata: { endpoint, attempt, delay } + }); + + await this.sleep(delay); + } + } + + throw lastError; + } + + private async request( + method: string, + endpoint: string, + data?: unknown, + useAuth: boolean = true + ): Promise { + const url = `${this.baseUrl}${endpoint}`; + const headers: Record = { + 'Content-Type': 'application/json' + }; + + // Add authentication + if (useAuth) { + if (this.config.token) { + headers['Authorization'] = `PVEAPIToken=${this.config.token}`; + } else if (this.ticket) { + headers['Cookie'] = `PVEAuthCookie=${this.ticket}`; + if (method !== 'GET' && this.csrfToken) { + headers['CSRFPreventionToken'] = this.csrfToken; + } + } + } + + try { + const response = await this.fetchWithTimeout(url, { + method, + headers, + body: data ? JSON.stringify(data) : undefined, + agent: this.httpsAgent + }); + + return await this.handleResponse(response); + } catch (error) { + // Handle ticket expiration + if (error instanceof ProxmoxAuthenticationError && this.ticket) { + this.logger.info('Authentication ticket expired, re-authenticating', { + component: 'ProxmoxClient', + operation: 'request' + }); + await this.authenticate(); + // Retry request with new ticket + return await this.request(method, endpoint, data, useAuth); + } + throw error; + } + } + + private async handleResponse(response: Response): Promise { + // Handle authentication errors + if (response.status === 401 || response.status === 403) { + throw new ProxmoxAuthenticationError( + 'Authentication failed', + { status: response.status } + ); + } + + // Handle not found + if (response.status === 404) { + throw new ProxmoxError( + 'Resource not found', + 'HTTP_404', + { status: response.status } + ); + } + + // Handle other errors + if (!response.ok) { + const errorText = await response.text(); + throw new ProxmoxError( + `Proxmox API error: ${response.statusText}`, + `HTTP_${response.status}`, + { + status: response.status, + statusText: response.statusText, + body: errorText + } + ); + } + + // Parse JSON response + const json = await response.json(); + return json.data; // Proxmox wraps responses in {data: ...} + } + + private createHttpsAgent(sslConfig: ProxmoxSSLConfig): https.Agent { + const agentOptions: https.AgentOptions = { + rejectUnauthorized: sslConfig.rejectUnauthorized ?? true + }; + + if (sslConfig.ca) { + agentOptions.ca = fs.readFileSync(sslConfig.ca); + } + + if (sslConfig.cert) { + agentOptions.cert = fs.readFileSync(sslConfig.cert); + } + + if (sslConfig.key) { + agentOptions.key = fs.readFileSync(sslConfig.key); + } + + return new https.Agent(agentOptions); + } + + private async fetchWithTimeout( + url: string, + options: RequestInit & { agent?: https.Agent }, + timeout: number = 30000 + ): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), timeout); + + try { + const response = await fetch(url, { + ...options, + signal: controller.signal + }); + return response; + } finally { + clearTimeout(timeoutId); + } + } + + private sleep(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)); + } +} +``` + +## Data Models + +### Type Definitions + +**File**: `pabawi/backend/src/integrations/proxmox/types.ts` + +```typescript +/** + * Proxmox configuration + */ +export interface ProxmoxConfig { + host: string; + port?: number; + username?: string; + password?: string; + realm?: string; + token?: string; + ssl?: ProxmoxSSLConfig; + timeout?: number; +} + +/** + * SSL configuration for Proxmox client + */ +export interface ProxmoxSSLConfig { + rejectUnauthorized?: boolean; + ca?: string; + cert?: string; + key?: string; +} + +/** + * Proxmox guest (VM or LXC) from API + */ +export interface ProxmoxGuest { + vmid: number; + name: string; + node: string; + type: 'qemu' | 'lxc'; + status: 'running' | 'stopped' | 'paused'; + maxmem?: number; + maxdisk?: number; + cpus?: number; + uptime?: number; + netin?: number; + netout?: number; + diskread?: number; + diskwrite?: number; +} + +/** + * Proxmox guest configuration + */ +export interface ProxmoxGuestConfig { + vmid: number; + name: string; + cores: number; + memory: number; + sockets?: number; + cpu?: string; + bootdisk?: string; + scsihw?: string; + net0?: string; + net1?: string; + ide2?: string; + [key: string]: unknown; +} + +/** + * Proxmox guest status + */ +export interface ProxmoxGuestStatus { + status: 'running' | 'stopped' | 'paused'; + vmid: number; + uptime?: number; + cpus?: number; + maxmem?: number; + mem?: number; + maxdisk?: number; + disk?: number; + netin?: number; + netout?: number; + diskread?: number; + diskwrite?: number; +} + +/** + * VM creation parameters + */ +export interface VMCreateParams { + vmid: number; + name: string; + node: string; + cores?: number; + memory?: number; + sockets?: number; + cpu?: string; + scsi0?: string; + ide2?: string; + net0?: string; + ostype?: string; + [key: string]: unknown; +} + +/** + * LXC creation parameters + */ +export interface LXCCreateParams { + vmid: number; + hostname: string; + node: string; + ostemplate: string; + cores?: number; + memory?: number; + rootfs?: string; + net0?: string; + password?: string; + [key: string]: unknown; +} + +/** + * Proxmox task status + */ +export interface ProxmoxTaskStatus { + status: 'running' | 'stopped'; + exitstatus?: string; + type: string; + node: string; + pid: number; + pstart: number; + starttime: number; + upid: string; +} + +/** + * Provisioning capability interface + */ +export interface ProvisioningCapability extends Capability { + operation: 'create' | 'destroy'; +} + +/** + * Retry configuration + */ +export interface RetryConfig { + maxAttempts: number; + initialDelay: number; + maxDelay: number; + backoffMultiplier: number; + retryableErrors: string[]; +} + +/** + * Proxmox error classes + */ +export class ProxmoxError extends Error { + constructor( + message: string, + public code: string, + public details?: unknown + ) { + super(message); + this.name = 'ProxmoxError'; + } +} + +export class ProxmoxAuthenticationError extends ProxmoxError { + constructor(message: string, details?: unknown) { + super(message, 'PROXMOX_AUTH_ERROR', details); + this.name = 'ProxmoxAuthenticationError'; + } +} + +export class ProxmoxConnectionError extends ProxmoxError { + constructor(message: string, details?: unknown) { + super(message, 'PROXMOX_CONNECTION_ERROR', details); + this.name = 'ProxmoxConnectionError'; + } +} +``` + +### API Endpoint Mappings + +| Operation | HTTP Method | Endpoint | Description | +|-----------|-------------|----------|-------------| +| Get version | GET | `/api2/json/version` | Get Proxmox VE version | +| Authenticate | POST | `/api2/json/access/ticket` | Get authentication ticket | +| List resources | GET | `/api2/json/cluster/resources?type=vm` | List all VMs and containers | +| Get VM config | GET | `/api2/json/nodes/{node}/qemu/{vmid}/config` | Get VM configuration | +| Get LXC config | GET | `/api2/json/nodes/{node}/lxc/{vmid}/config` | Get container configuration | +| Get VM status | GET | `/api2/json/nodes/{node}/qemu/{vmid}/status/current` | Get VM current status | +| Get LXC status | GET | `/api2/json/nodes/{node}/lxc/{vmid}/status/current` | Get container current status | +| Start VM | POST | `/api2/json/nodes/{node}/qemu/{vmid}/status/start` | Start a VM | +| Start LXC | POST | `/api2/json/nodes/{node}/lxc/{vmid}/status/start` | Start a container | +| Stop VM | POST | `/api2/json/nodes/{node}/qemu/{vmid}/status/stop` | Force stop a VM | +| Stop LXC | POST | `/api2/json/nodes/{node}/lxc/{vmid}/status/stop` | Force stop a container | +| Shutdown VM | POST | `/api2/json/nodes/{node}/qemu/{vmid}/status/shutdown` | Graceful shutdown VM | +| Shutdown LXC | POST | `/api2/json/nodes/{node}/lxc/{vmid}/status/shutdown` | Graceful shutdown container | +| Reboot VM | POST | `/api2/json/nodes/{node}/qemu/{vmid}/status/reboot` | Reboot a VM | +| Reboot LXC | POST | `/api2/json/nodes/{node}/lxc/{vmid}/status/reboot` | Reboot a container | +| Suspend VM | POST | `/api2/json/nodes/{node}/qemu/{vmid}/status/suspend` | Suspend a VM | +| Resume VM | POST | `/api2/json/nodes/{node}/qemu/{vmid}/status/resume` | Resume a VM | +| Create VM | POST | `/api2/json/nodes/{node}/qemu` | Create a new VM | +| Create LXC | POST | `/api2/json/nodes/{node}/lxc` | Create a new container | +| Delete VM | DELETE | `/api2/json/nodes/{node}/qemu/{vmid}` | Delete a VM | +| Delete LXC | DELETE | `/api2/json/nodes/{node}/lxc/{vmid}` | Delete a container | +| Get task status | GET | `/api2/json/nodes/{node}/tasks/{upid}/status` | Get task status | + +### New Type Definition in Core Types + +**File**: `pabawi/backend/src/integrations/types.ts` + +Add the following interface: + +```typescript +/** + * Provisioning capability for infrastructure creation/destruction + */ +export interface ProvisioningCapability extends Capability { + operation: 'create' | 'destroy'; +} +``` + +## Correctness Properties + +*A property is a characteristic or behavior that should hold true across all valid executions of a system—essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.* + +### Property Reflection Analysis + +After analyzing all acceptance criteria, I identified the following redundancies and consolidations: + +**Redundancy Group 1: Configuration Validation** + +- Requirements 2.3, 2.4, 16.1, 16.2, 16.3, 16.5, 16.6 all relate to configuration validation +- These can be consolidated into comprehensive properties about invalid configuration rejection + +**Redundancy Group 2: Node Transformation** + +- Requirements 5.3, 5.4, 5.5, 5.6, 5.7 all relate to guest-to-node transformation +- These can be consolidated into properties about transformation correctness + +**Redundancy Group 3: Group ID Formatting** + +- Requirements 6.5, 6.6, 6.7 all relate to group ID format validation +- These can be consolidated into a single property about ID format correctness + +**Redundancy Group 4: Error Handling** + +- Requirements 14.1, 14.7 relate to general error handling +- Requirements 3.5, 7.7, 8.10, 10.7, 11.7, 12.7 relate to specific error cases +- These can be consolidated into properties about error message descriptiveness + +**Redundancy Group 5: Authentication** + +- Requirements 3.1, 3.2, 3.6 relate to authentication behavior +- These can be consolidated into properties about authentication correctness + +### Property 1: Configuration Validation Rejects Invalid Inputs + +*For any* configuration object with missing required fields (host, authentication credentials), invalid port numbers (outside 1-65535), invalid hostnames, or missing realm for password authentication, initialization should fail with a descriptive error indicating the specific validation failure. + +**Validates: Requirements 2.3, 2.4, 16.1, 16.2, 16.3, 16.5, 16.6** + +### Property 2: HTTPS Protocol Usage + +*For any* API call made by ProxmoxClient, the request URL should use the HTTPS protocol. + +**Validates: Requirements 3.6** + +### Property 3: Authentication Ticket Persistence + +*For any* authenticated ProxmoxClient using password authentication, subsequent API calls should reuse the stored authentication ticket without re-authenticating until the ticket expires. + +**Validates: Requirements 3.2** + +### Property 4: Guest-to-Node Transformation Completeness + +*For any* Proxmox guest object returned from the API, the transformed Node object should contain all required fields (id, name, status, metadata with node and type), the type field should correctly distinguish between 'qemu' and 'lxc', and IP addresses should be included when available or omitted (not null) when unavailable. + +**Validates: Requirements 5.3, 5.4, 5.5, 5.6, 5.7** + +### Property 5: Group Creation by Node + +*For any* set of guests distributed across multiple Proxmox nodes, calling getGroups() should create one NodeGroup per unique node, where each group contains exactly the guests on that node. + +**Validates: Requirements 6.2** + +### Property 6: Group Creation by Status + +*For any* set of guests with various status values, calling getGroups() should create one NodeGroup per unique status, where each group contains exactly the guests with that status. + +**Validates: Requirements 6.3** + +### Property 7: Group Creation by Type + +*For any* set of guests containing both VMs and LXC containers, calling getGroups() should create exactly two type-based groups: one for VMs and one for LXC containers. + +**Validates: Requirements 6.4** + +### Property 8: Group ID Format Correctness + +*For any* NodeGroup created by the Proxmox integration, the group ID should follow the correct format: "proxmox:node:{nodename}" for node groups, "proxmox:status:{status}" for status groups, and "proxmox:type:{type}" for type groups. + +**Validates: Requirements 6.5, 6.6, 6.7** + +### Property 9: Facts Transformation Completeness + +*For any* guest configuration and status data from the Proxmox API, the transformed Facts object should include CPU, memory, disk, and network configuration fields, and should include current resource usage when the guest status is 'running'. + +**Validates: Requirements 7.4, 7.5, 7.6** + +### Property 10: Non-Existent Guest Error + +*For any* VMID that does not exist on a given node, calling getNodeFacts() or destroyGuest() should throw a descriptive error indicating the guest was not found. + +**Validates: Requirements 7.7, 12.7** + +### Property 11: Action Completion Waiting + +*For any* lifecycle action (start, stop, shutdown, reboot, suspend, resume) or provisioning action (create, destroy), the service should wait for the Proxmox task to complete before returning the result. + +**Validates: Requirements 8.9, 10.5, 11.5, 12.5** + +### Property 12: Failed Action Error Details + +*For any* action that fails (lifecycle or provisioning), the returned ExecutionResult should have success=false and include descriptive error details. + +**Validates: Requirements 8.10, 10.7, 11.7** + +### Property 13: VMID Uniqueness Validation + +*For any* VM or LXC creation request with a VMID that already exists on the target node, the creation should fail with an error indicating the VMID is already in use. + +**Validates: Requirements 10.4, 11.4** + +### Property 14: Running Guest Stop Before Destruction + +*For any* guest that is in 'running' status, calling destroyGuest() should first stop the guest before deleting it. + +**Validates: Requirements 12.3** + +### Property 15: Task Completion Detection + +*For any* Proxmox task, calling waitForTask() should poll the task status endpoint until the task status is 'stopped', then return success if exitstatus is 'OK' or throw an error with the task's error message otherwise. + +**Validates: Requirements 13.4, 13.5** + +### Property 16: Custom Timeout Respect + +*For any* custom timeout value provided to waitForTask(), the method should use that timeout instead of the default 300 seconds. + +**Validates: Requirements 13.7** + +### Property 17: HTTP Error Transformation + +*For any* HTTP error response from the Proxmox API, ProxmoxClient should transform it into a descriptive ProxmoxError with an appropriate error code and details. + +**Validates: Requirements 14.1** + +### Property 18: Retry Logic for Transient Failures + +*For any* transient network failure (ECONNRESET, ETIMEDOUT, ENOTFOUND), ProxmoxClient should retry the request up to 3 times with exponential backoff. + +**Validates: Requirements 15.2** + +### Property 19: No Retry for Authentication Failures + +*For any* authentication failure (401, 403 except ticket expiration), ProxmoxClient should not retry the request and should immediately throw a ProxmoxAuthenticationError. + +**Validates: Requirements 15.3** + +### Property 20: No Retry for Client Errors + +*For any* 4xx HTTP error except 429 (rate limiting), ProxmoxClient should not retry the request and should immediately throw an error. + +**Validates: Requirements 15.4** + +### Property 21: Error Logging + +*For any* error that occurs in ProxmoxService, the error should be logged using LoggerService with appropriate context including component name and operation. + +**Validates: Requirements 14.7** + +### Property 22: Inventory Cache Behavior + +*For any* two calls to getInventory() within 60 seconds, the second call should return cached results without querying the Proxmox API. + +**Validates: Requirements 20.1** + +### Property 23: Groups Cache Behavior + +*For any* two calls to getGroups() within 60 seconds, the second call should return cached results without querying the Proxmox API. + +**Validates: Requirements 20.2** + +### Property 24: Facts Cache Behavior + +*For any* two calls to getNodeFacts() for the same node within 30 seconds, the second call should return cached results without querying the Proxmox API. + +**Validates: Requirements 20.3** + +## Error Handling + +### Error Hierarchy + +``` +Error +├── ProxmoxError (base class for all Proxmox errors) +│ ├── ProxmoxAuthenticationError (401, 403 errors) +│ ├── ProxmoxConnectionError (network failures, timeouts) +│ └── ProxmoxTaskError (task execution failures) +``` + +### Error Handling Strategy + +**ProxmoxClient Layer** + +- Catches all HTTP errors and transforms them into domain-specific exceptions +- Maps HTTP status codes to appropriate error types: + - 401/403 → ProxmoxAuthenticationError + - 404 → ProxmoxError with HTTP_404 code + - 429 → ProxmoxError with HTTP_429 code (triggers retry with backoff) + - 5xx → ProxmoxError with HTTP_5xx code + - Network errors → ProxmoxConnectionError +- Implements automatic ticket refresh on 401 errors for password authentication +- Logs all errors with context before throwing + +**ProxmoxService Layer** + +- Catches errors from ProxmoxClient +- Adds business logic context to errors +- Logs errors with operation-specific details +- Transforms errors into ExecutionResult objects for action methods +- Re-throws errors for information retrieval methods (getInventory, getNodeFacts, etc.) + +**ProxmoxIntegration Layer** + +- Catches errors from ProxmoxService during health checks +- Returns degraded status for authentication errors +- Returns unhealthy status for other errors +- Allows errors to propagate for data retrieval and action execution + +### Error Response Examples + +**Authentication Failure** + +```typescript +throw new ProxmoxAuthenticationError( + 'Failed to authenticate with Proxmox API', + { + username: config.username, + realm: config.realm, + host: config.host + } +); +``` + +**Guest Not Found** + +```typescript +throw new ProxmoxError( + `Guest ${vmid} not found on node ${node}`, + 'GUEST_NOT_FOUND', + { vmid, node } +); +``` + +**Task Timeout** + +```typescript +throw new ProxmoxError( + `Task timeout after ${timeout}ms`, + 'TASK_TIMEOUT', + { taskId, node, timeout } +); +``` + +**VMID Already Exists** + +```typescript +return { + success: false, + error: `VM with VMID ${vmid} already exists on node ${node}`, + metadata: { vmid, node } +}; +``` + +### Retry Logic + +**Retryable Errors** + +- Network timeouts (ETIMEDOUT) +- Connection resets (ECONNRESET) +- DNS resolution failures (ENOTFOUND) +- 429 Rate Limiting (with Retry-After header respect) +- 5xx Server errors + +**Non-Retryable Errors** + +- Authentication failures (401, 403) +- Client errors (400, 404, etc.) +- Validation errors +- Resource not found errors + +**Retry Configuration** + +- Maximum attempts: 3 +- Initial delay: 1000ms +- Maximum delay: 10000ms +- Backoff multiplier: 2 (exponential backoff) +- Delay calculation: `min(initialDelay * (multiplier ^ attempt), maxDelay)` + +### Logging Strategy + +All errors are logged with structured context: + +```typescript +this.logger.error('Failed to create VM', { + component: 'ProxmoxService', + operation: 'createVM', + metadata: { + vmid: params.vmid, + node: params.node, + error: error.message + } +}, error); +``` + +## Testing Strategy + +### Dual Testing Approach + +The Proxmox integration requires both unit tests and property-based tests for comprehensive coverage: + +**Unit Tests** - Focus on: + +- Specific examples of successful operations +- Edge cases (empty responses, missing fields) +- Error conditions (network failures, auth failures) +- Integration points between components +- Mock Proxmox API responses + +**Property-Based Tests** - Focus on: + +- Universal properties across all inputs +- Data transformation correctness +- Configuration validation +- Caching behavior +- Retry logic +- Error handling patterns + +### Property-Based Testing Configuration + +**Testing Library**: Use `fast-check` for TypeScript property-based testing + +**Test Configuration**: + +- Minimum 100 iterations per property test +- Each test must reference its design document property +- Tag format: `Feature: proxmox-integration, Property {number}: {property_text}` + +**Example Property Test**: + +```typescript +import fc from 'fast-check'; + +describe('Proxmox Integration Properties', () => { + // Feature: proxmox-integration, Property 1: Configuration Validation Rejects Invalid Inputs + it('should reject configurations with invalid ports', () => { + fc.assert( + fc.property( + fc.integer({ min: -1000, max: 0 }).chain(n => + fc.constant(n).chain(port => + fc.record({ + host: fc.constant('proxmox.example.com'), + port: fc.constant(port), + username: fc.constant('root'), + password: fc.constant('password'), + realm: fc.constant('pam') + }) + ) + ), + (config) => { + const integration = new ProxmoxIntegration(); + expect(() => integration.initialize({ + enabled: true, + name: 'proxmox', + type: 'both', + config + })).toThrow(/port must be between 1 and 65535/); + } + ), + { numRuns: 100 } + ); + }); + + // Feature: proxmox-integration, Property 4: Guest-to-Node Transformation Completeness + it('should transform any guest to a complete Node object', () => { + fc.assert( + fc.property( + fc.record({ + vmid: fc.integer({ min: 100, max: 999999 }), + name: fc.string({ minLength: 1, maxLength: 50 }), + node: fc.string({ minLength: 1, maxLength: 20 }), + type: fc.constantFrom('qemu', 'lxc'), + status: fc.constantFrom('running', 'stopped', 'paused'), + maxmem: fc.option(fc.integer({ min: 0 })), + cpus: fc.option(fc.integer({ min: 1, max: 128 })) + }), + (guest) => { + const service = new ProxmoxService(mockConfig, mockLogger, mockPerfMonitor); + const node = service['transformGuestToNode'](guest); + + expect(node.id).toBeDefined(); + expect(node.name).toBe(guest.name); + expect(node.status).toBe(guest.status); + expect(node.metadata?.node).toBe(guest.node); + expect(node.metadata?.type).toBe(guest.type); + + // IP should be omitted (not null) when unavailable + if (node.ip !== undefined) { + expect(typeof node.ip).toBe('string'); + } + } + ), + { numRuns: 100 } + ); + }); +}); +``` + +### Unit Test Coverage Requirements + +**ProxmoxClient Tests**: + +- Authentication with password and token +- Ticket refresh on expiration +- HTTP error handling (401, 403, 404, 429, 5xx) +- Retry logic with exponential backoff +- Task polling and timeout +- Request/response transformation + +**ProxmoxService Tests**: + +- Inventory retrieval and caching +- Group creation (by node, status, type) +- Facts retrieval and caching +- VM creation and validation +- LXC creation and validation +- Guest destruction with stop-before-delete +- Action execution (start, stop, shutdown, etc.) +- Cache clearing + +**ProxmoxIntegration Tests**: + +- Plugin initialization +- Configuration validation +- Health check (healthy, degraded, unhealthy) +- Interface method delegation +- Capability listing + +### Test Data Generators + +Create reusable generators for property tests: + +```typescript +// Generator for valid Proxmox configurations +const validConfigArb = fc.record({ + host: fc.domain(), + port: fc.integer({ min: 1, max: 65535 }), + username: fc.string({ minLength: 1 }), + password: fc.string({ minLength: 1 }), + realm: fc.constantFrom('pam', 'pve') +}); + +// Generator for Proxmox guests +const guestArb = fc.record({ + vmid: fc.integer({ min: 100, max: 999999 }), + name: fc.string({ minLength: 1, maxLength: 50 }), + node: fc.string({ minLength: 1, maxLength: 20 }), + type: fc.constantFrom('qemu', 'lxc'), + status: fc.constantFrom('running', 'stopped', 'paused') +}); + +// Generator for VM creation parameters +const vmCreateParamsArb = fc.record({ + vmid: fc.integer({ min: 100, max: 999999 }), + name: fc.string({ minLength: 1, maxLength: 50 }), + node: fc.string({ minLength: 1, maxLength: 20 }), + cores: fc.integer({ min: 1, max: 128 }), + memory: fc.integer({ min: 512, max: 1048576 }) +}); +``` + +### Mock Strategy + +Use Jest mocks for external dependencies: + +```typescript +// Mock ProxmoxClient for ProxmoxService tests +jest.mock('./ProxmoxClient'); + +// Mock fetch for ProxmoxClient tests +global.fetch = jest.fn(); + +// Mock LoggerService +const mockLogger = { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn() +}; + +// Mock PerformanceMonitorService +const mockPerfMonitor = { + startTimer: jest.fn(() => jest.fn()) +}; +``` + +### Integration Test Considerations + +While unit and property tests cover most scenarios, integration tests against a real Proxmox instance would validate: + +- Actual API compatibility +- Network behavior +- Authentication flows +- Task polling timing +- Real-world error scenarios + +These should be run in a CI/CD environment with a test Proxmox cluster. + +## Performance Considerations + +### Caching Strategy + +**Cache Implementation**: Use `SimpleCache` utility (same as PuppetDB integration) + +**Cache TTLs**: + +- Inventory: 60 seconds (configurable) +- Groups: 60 seconds (configurable) +- Facts: 30 seconds (configurable) +- Health checks: 30 seconds (inherited from BasePlugin) + +**Cache Keys**: + +- Inventory: `inventory:all` +- Groups: `groups:all` +- Facts: `facts:{nodeId}` + +**Cache Invalidation**: + +- Automatic expiration after TTL +- Manual clearing via `clearCache()` method +- Automatic clearing after provisioning operations (create/destroy) + +**Cache Benefits**: + +- Reduces load on Proxmox API +- Improves response times for repeated queries +- Prevents rate limiting issues +- Reduces network latency impact + +### Parallel API Calls + +When fetching data for multiple guests, use `Promise.all()` to execute requests in parallel: + +```typescript +async getMultipleGuestFacts(nodeIds: string[]): Promise { + const factsPromises = nodeIds.map(nodeId => this.getNodeFacts(nodeId)); + return await Promise.all(factsPromises); +} +``` + +### Connection Pooling + +**HTTP Agent Configuration**: + +- Reuse HTTPS agent across requests +- Configure keep-alive for connection reuse +- Set appropriate timeout values + +```typescript +const httpsAgent = new https.Agent({ + keepAlive: true, + keepAliveMsecs: 30000, + maxSockets: 50, + maxFreeSockets: 10, + timeout: 30000 +}); +``` + +### Performance Monitoring + +Use `PerformanceMonitorService` to track operation durations: + +```typescript +async getInventory(): Promise { + const complete = this.performanceMonitor.startTimer('proxmox:getInventory'); + + try { + // ... operation logic + complete({ cached: false, nodeCount: nodes.length }); + return nodes; + } catch (error) { + complete({ error: error.message }); + throw error; + } +} +``` + +**Monitored Operations**: + +- `proxmox:getInventory` +- `proxmox:getGroups` +- `proxmox:getNodeFacts` +- `proxmox:executeAction` +- `proxmox:createVM` +- `proxmox:createLXC` +- `proxmox:destroyGuest` + +### Optimization Recommendations + +1. **Batch Operations**: When possible, use cluster-wide endpoints instead of per-node queries +2. **Selective Field Retrieval**: Only request needed fields from Proxmox API +3. **Lazy Loading**: Defer expensive operations until actually needed +4. **Debouncing**: For UI-triggered operations, implement debouncing to prevent excessive API calls +5. **Background Refresh**: Consider background cache refresh for frequently accessed data + +## Security Considerations + +### Authentication Security + +**Token Authentication (Recommended)**: + +- More secure than password authentication +- No credential transmission after initial setup +- Supports fine-grained permissions +- Can be easily revoked + +**Password Authentication**: + +- Requires secure credential storage +- Credentials transmitted during ticket acquisition +- Ticket has limited lifetime (2 hours by default) +- Automatic ticket refresh on expiration + +### TLS/SSL Configuration + +**Certificate Verification**: + +- Enabled by default (`rejectUnauthorized: true`) +- Log security warning when disabled +- Support custom CA certificates for self-signed certs + +**Secure Communication**: + +- All API calls use HTTPS +- No fallback to HTTP +- Support for client certificates + +### Credential Management + +**Configuration Storage**: + +- Never log passwords or tokens +- Store credentials in environment variables or secure vaults +- Use configuration encryption at rest + +**Error Messages**: + +- Sanitize error messages to avoid credential leakage +- Don't include passwords in error details +- Log authentication failures without sensitive data + +### Input Validation + +**Configuration Validation**: + +- Validate host format (prevent injection) +- Validate port range +- Validate required fields +- Sanitize user inputs + +**API Parameter Validation**: + +- Validate VMID format and range +- Validate node names +- Sanitize guest names and descriptions +- Prevent command injection in parameters + +### Rate Limiting + +**Client-Side Rate Limiting**: + +- Respect Retry-After headers +- Implement exponential backoff +- Limit concurrent requests + +**Error Handling**: + +- Don't expose internal system details in errors +- Log security-relevant events +- Monitor for suspicious patterns + +## Deployment Considerations + +### Configuration Example + +```typescript +// In pabawi configuration +{ + integrations: { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + priority: 10, + config: { + host: process.env.PROXMOX_HOST || 'proxmox.example.com', + port: parseInt(process.env.PROXMOX_PORT || '8006'), + token: process.env.PROXMOX_TOKEN, // Preferred + // OR password authentication: + // username: process.env.PROXMOX_USERNAME, + // password: process.env.PROXMOX_PASSWORD, + // realm: process.env.PROXMOX_REALM || 'pam', + ssl: { + rejectUnauthorized: process.env.PROXMOX_SSL_VERIFY !== 'false', + ca: process.env.PROXMOX_CA_CERT, + cert: process.env.PROXMOX_CLIENT_CERT, + key: process.env.PROXMOX_CLIENT_KEY + }, + timeout: 30000 + } + } + } +} +``` + +### Environment Variables + +```bash +# Required +PROXMOX_HOST=proxmox.example.com +PROXMOX_PORT=8006 + +# Token authentication (recommended) +PROXMOX_TOKEN=user@realm!tokenid=uuid + +# OR password authentication +PROXMOX_USERNAME=root +PROXMOX_PASSWORD=secret +PROXMOX_REALM=pam + +# Optional SSL configuration +PROXMOX_SSL_VERIFY=true +PROXMOX_CA_CERT=/path/to/ca.pem +PROXMOX_CLIENT_CERT=/path/to/client.pem +PROXMOX_CLIENT_KEY=/path/to/client-key.pem +``` + +### Proxmox API Token Setup + +To create an API token in Proxmox: + +1. Navigate to Datacenter → Permissions → API Tokens +2. Click "Add" to create a new token +3. Select user and enter token ID +4. Optionally disable "Privilege Separation" for full user permissions +5. Copy the generated token (format: `user@realm!tokenid=uuid`) +6. Set appropriate permissions for the token user + +**Recommended Permissions**: + +- VM.Allocate (for creating VMs) +- VM.Config.* (for configuring VMs) +- VM.PowerMgmt (for start/stop operations) +- VM.Audit (for reading VM information) +- Datastore.Allocate (for disk operations) + +### Health Check Integration + +The integration provides health check endpoints that can be monitored: + +```typescript +// Check Proxmox integration health +const health = await integrationManager.healthCheckAll(); +const proxmoxHealth = health.get('proxmox'); + +if (!proxmoxHealth.healthy) { + // Alert or take corrective action + logger.error('Proxmox integration unhealthy', { health: proxmoxHealth }); +} +``` + +### Monitoring and Alerting + +**Key Metrics to Monitor**: + +- Health check status +- API response times +- Cache hit rates +- Error rates by type +- Task completion times +- Authentication failures + +**Alerting Thresholds**: + +- Health check failures > 3 consecutive +- API response time > 5 seconds +- Error rate > 10% of requests +- Authentication failures > 5 per hour + +## Migration and Compatibility + +### Backward Compatibility + +This integration introduces new functionality without breaking existing integrations: + +1. **New Interface**: `ProvisioningCapability` extends `Capability` without modifying existing interfaces +2. **Optional Methods**: Provisioning methods are optional additions to ExecutionToolPlugin +3. **Independent Operation**: Proxmox integration operates independently of other integrations + +### Integration with Existing Systems + +**IntegrationManager Compatibility**: + +- Follows existing plugin registration pattern +- Uses standard health check caching +- Participates in inventory aggregation +- Supports group linking across sources + +**Data Model Compatibility**: + +- Uses existing `Node`, `Facts`, `NodeGroup` types +- Follows existing node ID format: `{source}:{identifier}` +- Compatible with existing UI components + +### Future Extensibility + +**Planned Enhancements**: + +1. Support for Proxmox Backup Server integration +2. Storage management capabilities +3. Network configuration management +4. Snapshot management +5. Template management +6. Cluster management operations + +**Extension Points**: + +- Additional provisioning capabilities can be added to `listProvisioningCapabilities()` +- New action types can be added to `executeAction()` +- Additional data types can be supported in `getNodeData()` + +## References + +- [Proxmox VE API Documentation](https://pve.proxmox.com/pve-docs/api-viewer/) +- [Proxmox VE API Wiki](https://pve.proxmox.com/wiki/Proxmox_VE_API) +- [PuppetDB Integration](pabawi/backend/src/integrations/puppetdb/) - Reference implementation +- [BasePlugin](pabawi/backend/src/integrations/BasePlugin.ts) - Plugin base class +- [Integration Types](pabawi/backend/src/integrations/types.ts) - Core interfaces diff --git a/.kiro/specs/090/proxmox-integration/requirements.md b/.kiro/specs/090/proxmox-integration/requirements.md new file mode 100644 index 00000000..2e9bf7cb --- /dev/null +++ b/.kiro/specs/090/proxmox-integration/requirements.md @@ -0,0 +1,302 @@ +# Requirements Document + +## Introduction + +This document specifies requirements for integrating Proxmox Virtual Environment (VE) into Pabawi. Proxmox VE is an open-source virtualization management platform that provides a REST API for managing virtual machines (VMs) and Linux containers (LXC). This integration introduces a new "provisioning" capability type to the system, enabling VM and container lifecycle management alongside existing inventory, facts, and action capabilities. + +The integration follows Pabawi's existing plugin architecture pattern used by PuppetDB, Bolt, Ansible, SSH, Hiera, and Puppetserver integrations. + +## Glossary + +- **Proxmox_Integration**: The plugin component that interfaces with Proxmox VE API +- **Proxmox_Service**: The service layer that handles API communication and data transformation +- **Proxmox_Client**: The HTTP client that executes REST API calls to Proxmox VE +- **VM**: Virtual Machine managed by Proxmox VE +- **LXC**: Linux Container managed by Proxmox VE +- **Guest**: Either a VM or LXC container +- **Node**: A physical Proxmox server in the cluster +- **Cluster**: A group of Proxmox nodes working together +- **VMID**: Unique numeric identifier for a guest (VM or LXC) +- **Integration_Manager**: The system component that orchestrates multiple integration plugins +- **Provisioning_Capability**: A new capability type for creating and destroying infrastructure resources +- **Inventory_Capability**: Capability to discover and list managed resources +- **Facts_Capability**: Capability to retrieve detailed information about specific resources +- **Action_Capability**: Capability to perform operations on existing resources + +## Requirements + +### Requirement 1: Plugin Architecture Compliance + +**User Story:** As a system architect, I want the Proxmox integration to follow the existing plugin architecture, so that it integrates seamlessly with other plugins. + +#### Acceptance Criteria + +1. THE Proxmox_Integration SHALL extend the BasePlugin class +2. THE Proxmox_Integration SHALL implement the InformationSourcePlugin interface +3. THE Proxmox_Integration SHALL implement the ExecutionToolPlugin interface +4. THE Proxmox_Integration SHALL register with the Integration_Manager during initialization +5. THE Proxmox_Integration SHALL provide a configuration schema matching the IntegrationConfig type +6. THE Proxmox_Integration SHALL use LoggerService for all logging operations +7. THE Proxmox_Integration SHALL use PerformanceMonitorService for performance tracking + +### Requirement 2: Configuration Management + +**User Story:** As a system administrator, I want to configure Proxmox connection settings, so that the integration can connect to my Proxmox cluster. + +#### Acceptance Criteria + +1. THE Proxmox_Integration SHALL accept a configuration object containing host, port, username, password, and realm fields +2. WHERE token authentication is configured, THE Proxmox_Integration SHALL use API token authentication instead of password authentication +3. THE Proxmox_Integration SHALL validate required configuration fields during initialization +4. WHEN invalid configuration is provided, THE Proxmox_Integration SHALL throw a descriptive error +5. THE Proxmox_Integration SHALL support TLS certificate verification configuration +6. WHERE certificate verification is disabled, THE Proxmox_Integration SHALL log a security warning + +### Requirement 3: Authentication and Connection + +**User Story:** As a system administrator, I want the integration to authenticate with Proxmox securely, so that API operations are authorized. + +#### Acceptance Criteria + +1. WHEN initialized, THE Proxmox_Client SHALL authenticate with the Proxmox API using provided credentials +2. THE Proxmox_Client SHALL store the authentication ticket for subsequent API calls +3. WHEN the authentication ticket expires, THE Proxmox_Client SHALL automatically re-authenticate +4. THE Proxmox_Client SHALL support both password-based and token-based authentication +5. WHEN authentication fails, THE Proxmox_Client SHALL return a descriptive error message +6. THE Proxmox_Client SHALL use HTTPS for all API communications + +### Requirement 4: Health Check + +**User Story:** As a system operator, I want to monitor the health of the Proxmox integration, so that I can detect connectivity issues. + +#### Acceptance Criteria + +1. THE Proxmox_Integration SHALL implement the performHealthCheck method +2. WHEN performHealthCheck is called, THE Proxmox_Service SHALL query the Proxmox API version endpoint +3. WHEN the API responds successfully, THE Proxmox_Integration SHALL return a healthy status +4. WHEN the API is unreachable, THE Proxmox_Integration SHALL return an unhealthy status with error details +5. WHEN authentication fails, THE Proxmox_Integration SHALL return a degraded status indicating authentication issues +6. THE Proxmox_Integration SHALL cache health check results for 30 seconds to prevent excessive API calls + +### Requirement 5: Inventory Discovery + +**User Story:** As a system operator, I want to discover all VMs and containers in Proxmox, so that I can manage them through Pabawi. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL implement the getInventory method +2. WHEN getInventory is called, THE Proxmox_Service SHALL query all guests across all cluster nodes +3. THE Proxmox_Service SHALL transform each guest into a Node object with standardized fields +4. THE Proxmox_Service SHALL include VMID, name, status, node, and type in each Node object +5. THE Proxmox_Service SHALL distinguish between VMs and LXC containers using a type field +6. THE Proxmox_Service SHALL include IP addresses when available in the guest configuration +7. WHEN a guest has no IP address, THE Proxmox_Service SHALL omit the IP field rather than using null + +### Requirement 6: Group Management + +**User Story:** As a system operator, I want to organize guests by node, status, and type, so that I can manage groups of similar resources. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL implement the getGroups method +2. THE Proxmox_Service SHALL create NodeGroup objects for each Proxmox node containing its guests +3. THE Proxmox_Service SHALL create NodeGroup objects for each status type containing guests with that status +4. THE Proxmox_Service SHALL create NodeGroup objects for VM and LXC types +5. THE Proxmox_Service SHALL use the format "proxmox:node:{nodename}" for node-based group IDs +6. THE Proxmox_Service SHALL use the format "proxmox:status:{status}" for status-based group IDs +7. THE Proxmox_Service SHALL use the format "proxmox:type:{type}" for type-based group IDs + +### Requirement 7: Facts Retrieval + +**User Story:** As a system operator, I want to retrieve detailed information about a specific guest, so that I can understand its configuration and state. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL implement the getNodeFacts method +2. WHEN getNodeFacts is called with a VMID, THE Proxmox_Service SHALL query the guest configuration +3. THE Proxmox_Service SHALL query the guest status information +4. THE Proxmox_Service SHALL transform the configuration and status into a Facts object +5. THE Proxmox_Service SHALL include CPU, memory, disk, and network configuration in the Facts object +6. THE Proxmox_Service SHALL include current resource usage when the guest is running +7. WHEN the guest does not exist, THE Proxmox_Service SHALL throw a descriptive error + +### Requirement 8: VM Action Capabilities + +**User Story:** As a system operator, I want to start, stop, and pause VMs and containers, so that I can manage their lifecycle. + +#### Acceptance Criteria + +1. THE Proxmox_Integration SHALL implement the executeAction method +2. THE Proxmox_Integration SHALL support "start", "stop", "shutdown", "reboot", "suspend", and "resume" action types +3. WHEN executeAction is called with a start action, THE Proxmox_Service SHALL call the Proxmox start API endpoint +4. WHEN executeAction is called with a stop action, THE Proxmox_Service SHALL call the Proxmox stop API endpoint +5. WHEN executeAction is called with a shutdown action, THE Proxmox_Service SHALL call the Proxmox shutdown API endpoint +6. WHEN executeAction is called with a reboot action, THE Proxmox_Service SHALL call the Proxmox reboot API endpoint +7. WHEN executeAction is called with a suspend action, THE Proxmox_Service SHALL call the Proxmox suspend API endpoint +8. WHEN executeAction is called with a resume action, THE Proxmox_Service SHALL call the Proxmox resume API endpoint +9. THE Proxmox_Service SHALL wait for the action to complete before returning the result +10. WHEN an action fails, THE Proxmox_Service SHALL return an ExecutionResult with error details + +### Requirement 9: Provisioning Capability Type + +**User Story:** As a system architect, I want to define a new provisioning capability type, so that VM creation and destruction can be distinguished from other actions. + +#### Acceptance Criteria + +1. THE system SHALL define a ProvisioningCapability interface extending Capability +2. THE ProvisioningCapability interface SHALL include create and destroy operation types +3. THE Proxmox_Integration SHALL implement a listProvisioningCapabilities method +4. THE Proxmox_Integration SHALL return provisioning capabilities including "create_vm", "create_lxc", "destroy_vm", and "destroy_lxc" +5. THE Integration_Manager SHALL support querying plugins for provisioning capabilities +6. THE Integration_Manager SHALL aggregate provisioning capabilities from all plugins + +### Requirement 10: VM Creation + +**User Story:** As a system operator, I want to create new VMs through the Proxmox integration, so that I can provision infrastructure programmatically. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL implement a createVM method +2. THE createVM method SHALL accept parameters for VMID, name, node, CPU cores, memory, disk size, and network configuration +3. WHEN createVM is called, THE Proxmox_Service SHALL call the Proxmox VM creation API endpoint +4. THE Proxmox_Service SHALL validate that the VMID is unique before creation +5. THE Proxmox_Service SHALL wait for the VM creation task to complete +6. WHEN VM creation succeeds, THE Proxmox_Service SHALL return the VMID and status +7. WHEN VM creation fails, THE Proxmox_Service SHALL return a descriptive error message + +### Requirement 11: LXC Container Creation + +**User Story:** As a system operator, I want to create new LXC containers through the Proxmox integration, so that I can provision lightweight containers. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL implement a createLXC method +2. THE createLXC method SHALL accept parameters for VMID, name, node, CPU cores, memory, disk size, template, and network configuration +3. WHEN createLXC is called, THE Proxmox_Service SHALL call the Proxmox LXC creation API endpoint +4. THE Proxmox_Service SHALL validate that the VMID is unique before creation +5. THE Proxmox_Service SHALL wait for the LXC creation task to complete +6. WHEN LXC creation succeeds, THE Proxmox_Service SHALL return the VMID and status +7. WHEN LXC creation fails, THE Proxmox_Service SHALL return a descriptive error message + +### Requirement 12: Guest Destruction + +**User Story:** As a system operator, I want to destroy VMs and containers through the Proxmox integration, so that I can deprovision resources. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL implement a destroyGuest method +2. WHEN destroyGuest is called with a VMID, THE Proxmox_Service SHALL verify the guest exists +3. WHEN the guest is running, THE Proxmox_Service SHALL stop it before destruction +4. THE Proxmox_Service SHALL call the Proxmox deletion API endpoint +5. THE Proxmox_Service SHALL wait for the deletion task to complete +6. WHEN destruction succeeds, THE Proxmox_Service SHALL return a success status +7. WHEN the guest does not exist, THE Proxmox_Service SHALL return an error indicating the guest was not found + +### Requirement 13: Task Status Monitoring + +**User Story:** As a system operator, I want to monitor the status of long-running Proxmox tasks, so that I know when operations complete. + +#### Acceptance Criteria + +1. THE Proxmox_Client SHALL implement a waitForTask method +2. WHEN waitForTask is called with a task ID, THE Proxmox_Client SHALL poll the task status endpoint +3. THE Proxmox_Client SHALL poll every 2 seconds until the task completes or fails +4. WHEN the task completes successfully, THE Proxmox_Client SHALL return a success status +5. WHEN the task fails, THE Proxmox_Client SHALL return the error message from the task +6. THE Proxmox_Client SHALL timeout after 300 seconds and return a timeout error +7. WHERE a custom timeout is provided, THE Proxmox_Client SHALL use the custom timeout value + +### Requirement 14: Error Handling + +**User Story:** As a developer, I want comprehensive error handling, so that failures are reported clearly and the system remains stable. + +#### Acceptance Criteria + +1. THE Proxmox_Client SHALL catch HTTP errors and transform them into descriptive error messages +2. WHEN a 401 error occurs, THE Proxmox_Client SHALL indicate an authentication failure +3. WHEN a 403 error occurs, THE Proxmox_Client SHALL indicate a permission denial +4. WHEN a 404 error occurs, THE Proxmox_Client SHALL indicate the resource was not found +5. WHEN a 500 error occurs, THE Proxmox_Client SHALL indicate a server error with details +6. WHEN a network error occurs, THE Proxmox_Client SHALL indicate a connectivity failure +7. THE Proxmox_Service SHALL log all errors using LoggerService with appropriate context + +### Requirement 15: API Client Resilience + +**User Story:** As a system operator, I want the integration to handle transient failures gracefully, so that temporary network issues do not cause permanent failures. + +#### Acceptance Criteria + +1. THE Proxmox_Client SHALL implement retry logic for transient failures +2. THE Proxmox_Client SHALL retry failed requests up to 3 times with exponential backoff +3. THE Proxmox_Client SHALL not retry authentication failures +4. THE Proxmox_Client SHALL not retry 4xx client errors except 429 rate limit errors +5. WHEN a 429 error occurs, THE Proxmox_Client SHALL wait for the retry-after duration before retrying +6. THE Proxmox_Client SHALL log retry attempts with the attempt number and reason + +### Requirement 16: Configuration Validation + +**User Story:** As a system administrator, I want configuration errors to be detected early, so that I can fix them before operations fail. + +#### Acceptance Criteria + +1. THE Proxmox_Integration SHALL validate the host field is a valid hostname or IP address +2. THE Proxmox_Integration SHALL validate the port field is a number between 1 and 65535 +3. THE Proxmox_Integration SHALL validate that either password or token authentication is configured +4. WHEN both password and token are provided, THE Proxmox_Integration SHALL prefer token authentication +5. THE Proxmox_Integration SHALL validate the realm field is not empty when using password authentication +6. WHEN validation fails, THE Proxmox_Integration SHALL throw an error with specific field information + +### Requirement 17: Type Safety + +**User Story:** As a developer, I want strong TypeScript types for all Proxmox data structures, so that I can catch errors at compile time. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL define TypeScript interfaces for all Proxmox API response types +2. THE Proxmox_Service SHALL define TypeScript interfaces for VM configuration +3. THE Proxmox_Service SHALL define TypeScript interfaces for LXC configuration +4. THE Proxmox_Service SHALL define TypeScript interfaces for guest status +5. THE Proxmox_Service SHALL define TypeScript interfaces for task status +6. THE Proxmox_Service SHALL use type guards to validate API responses at runtime + +### Requirement 18: Documentation + +**User Story:** As a system administrator, I want comprehensive documentation for the Proxmox integration, so that I can configure and use it effectively. + +#### Acceptance Criteria + +1. THE integration SHALL include a markdown documentation file in docs/integrations/proxmox.md +2. THE documentation SHALL describe all configuration options with examples +3. THE documentation SHALL document all supported actions with parameter descriptions +4. THE documentation SHALL document all provisioning capabilities with examples +5. THE documentation SHALL include authentication setup instructions for both password and token methods +6. THE documentation SHALL include troubleshooting guidance for common issues +7. THE documentation SHALL include example configuration snippets + +### Requirement 19: Testing Requirements + +**User Story:** As a developer, I want comprehensive tests for the Proxmox integration, so that I can verify correctness and prevent regressions. + +#### Acceptance Criteria + +1. THE integration SHALL include unit tests for the Proxmox_Service class +2. THE integration SHALL include unit tests for the Proxmox_Client class +3. THE integration SHALL include unit tests for the Proxmox_Integration plugin class +4. THE integration SHALL mock Proxmox API responses in unit tests +5. THE integration SHALL test error handling for all API failure scenarios +6. THE integration SHALL test authentication token refresh logic +7. THE integration SHALL achieve at least 80% code coverage + +### Requirement 20: Performance Considerations + +**User Story:** As a system operator, I want the integration to perform efficiently, so that it does not slow down the system. + +#### Acceptance Criteria + +1. THE Proxmox_Service SHALL cache inventory results for 60 seconds +2. THE Proxmox_Service SHALL cache group results for 60 seconds +3. THE Proxmox_Service SHALL cache facts results for 30 seconds +4. THE Proxmox_Service SHALL provide a method to clear the cache manually +5. THE Proxmox_Service SHALL use PerformanceMonitorService to track API call durations +6. THE Proxmox_Client SHALL reuse HTTP connections for multiple requests +7. THE Proxmox_Service SHALL execute parallel API calls when fetching data for multiple guests diff --git a/.kiro/specs/090/proxmox-integration/tasks.md b/.kiro/specs/090/proxmox-integration/tasks.md new file mode 100644 index 00000000..27603101 --- /dev/null +++ b/.kiro/specs/090/proxmox-integration/tasks.md @@ -0,0 +1,413 @@ +# Implementation Plan: Proxmox Integration + +## Overview + +This implementation plan creates a new Proxmox Virtual Environment integration for Pabawi following the established plugin architecture. The integration enables VM and container lifecycle management, inventory discovery, and introduces a new "provisioning" capability type to the system. + +The implementation follows the existing patterns from PuppetDB, Bolt, and SSH integrations, using TypeScript with comprehensive error handling, caching, and retry logic. + +## Tasks + +- [x] 1. Set up project structure and type definitions + - Create `pabawi/backend/src/integrations/proxmox/` directory + - Create `types.ts` with all Proxmox-specific interfaces (ProxmoxConfig, ProxmoxGuest, ProxmoxGuestConfig, ProxmoxGuestStatus, VMCreateParams, LXCCreateParams, ProxmoxTaskStatus, RetryConfig, error classes) + - Update `pabawi/backend/src/integrations/types.ts` to add ProvisioningCapability interface + - _Requirements: 17.1, 17.2, 17.3, 17.4, 17.5, 9.1, 9.2_ + +- [x] 2. Implement ProxmoxClient HTTP layer + - [x] 2.1 Create ProxmoxClient class with authentication + - Implement constructor with config and logger + - Implement authenticate() method for both password and token authentication + - Implement ticket storage and HTTPS agent configuration + - _Requirements: 3.1, 3.2, 3.4, 3.6, 16.4_ + + - [x] 2.2 Implement HTTP request methods + - Implement get(), post(), delete() methods + - Implement request() method with authentication headers + - Implement handleResponse() with HTTP error transformation + - Implement automatic ticket refresh on 401 errors + - _Requirements: 3.3, 14.2, 14.3, 14.4, 14.5, 14.6_ + + - [x] 2.3 Implement retry logic with exponential backoff + - Implement requestWithRetry() method + - Configure retry for transient failures (ECONNRESET, ETIMEDOUT, ENOTFOUND) + - Implement exponential backoff calculation + - Handle 429 rate limiting with Retry-After header + - Skip retry for authentication and 4xx errors + - _Requirements: 15.1, 15.2, 15.3, 15.4, 15.5, 15.6_ + + - [x] 2.4 Implement task polling mechanism + - Implement waitForTask() method with configurable timeout + - Poll task status endpoint every 2 seconds + - Handle task completion (success/failure) + - Implement timeout after 300 seconds (default) + - _Requirements: 13.1, 13.2, 13.3, 13.4, 13.5, 13.6, 13.7_ + +- [x] 3. Checkpoint - Ensure ProxmoxClient tests pass + - Ensure all tests pass, ask the user if questions arise. + +- [x] 4. Implement ProxmoxService business logic layer + - [x] 4.1 Create ProxmoxService class with initialization + - Implement constructor with config, logger, and performanceMonitor + - Implement initialize() method to create ProxmoxClient + - Implement healthCheck() method querying version endpoint + - Initialize SimpleCache with 60s TTL + - _Requirements: 1.6, 1.7, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6_ + + - [x] 4.2 Implement inventory discovery + - Implement getInventory() method with caching + - Query cluster resources endpoint for all VMs and containers + - Implement transformGuestToNode() helper method + - Cache results for 60 seconds + - Use PerformanceMonitorService to track duration + - _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 20.1, 20.5_ + + - [x] 4.3 Implement group management + - Implement getGroups() method with caching + - Implement groupByNode() helper to create node-based groups + - Implement groupByStatus() helper to create status-based groups + - Implement groupByType() helper to create type-based groups + - Use correct group ID formats (proxmox:node:{name}, proxmox:status:{status}, proxmox:type:{type}) + - Cache results for 60 seconds + - _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 20.2_ + + - [x] 4.4 Implement facts retrieval + - Implement getNodeFacts() method with caching + - Parse VMID and node name from nodeId + - Implement getGuestType() helper to determine qemu vs lxc + - Query guest config and status endpoints + - Implement transformToFacts() helper method + - Include CPU, memory, disk, network config and current usage + - Handle non-existent guests with descriptive errors + - Cache results for 30 seconds + - _Requirements: 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 20.3_ + +- [x] 5. Implement lifecycle action capabilities + - [x] 5.1 Implement executeAction() dispatcher + - Implement executeAction() method to route actions + - Implement executeLifecycleAction() for start/stop/shutdown/reboot/suspend/resume + - Parse target nodeId to extract node and VMID + - Determine guest type and call appropriate endpoint + - Wait for action task completion + - Return ExecutionResult with success/error details + - _Requirements: 8.1, 8.2, 8.3, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9, 8.10_ + + - [x] 5.2 Implement capability listing + - Implement listCapabilities() method + - Return array of Capability objects for all lifecycle actions + - Include name, description, and parameters for each capability + - _Requirements: 8.1, 8.2_ + +- [x] 6. Implement provisioning capabilities + - [x] 6.1 Implement VM creation + - Implement createVM() method + - Implement guestExists() helper to check VMID uniqueness + - Validate VMID is unique before creation + - Call Proxmox VM creation endpoint + - Wait for creation task completion + - Clear inventory and groups cache after creation + - Return ExecutionResult with VMID and status + - _Requirements: 10.1, 10.2, 10.3, 10.4, 10.5, 10.6, 10.7_ + + - [x] 6.2 Implement LXC container creation + - Implement createLXC() method + - Validate VMID is unique before creation + - Call Proxmox LXC creation endpoint + - Wait for creation task completion + - Clear inventory and groups cache after creation + - Return ExecutionResult with VMID and status + - _Requirements: 11.1, 11.2, 11.3, 11.4, 11.5, 11.6, 11.7_ + + - [x] 6.3 Implement guest destruction + - Implement destroyGuest() method + - Verify guest exists before destruction + - Stop guest if running before deletion + - Call Proxmox deletion endpoint + - Wait for deletion task completion + - Clear all related caches (inventory, groups, facts) + - Return success status or error + - _Requirements: 12.1, 12.2, 12.3, 12.4, 12.5, 12.6, 12.7_ + + - [x] 6.4 Implement provisioning action dispatcher + - Implement executeProvisioningAction() method + - Route create_vm, create_lxc, destroy_vm, destroy_lxc actions + - Validate parameters for each action type + - Call appropriate service method + - _Requirements: 9.3, 9.4_ + + - [x] 6.5 Implement provisioning capability listing + - Implement listProvisioningCapabilities() method + - Return ProvisioningCapability objects for create_vm, create_lxc, destroy_vm, destroy_lxc + - Include operation type (create/destroy) and parameters + - _Requirements: 9.3, 9.4, 9.5_ + +- [x] 7. Checkpoint - Ensure ProxmoxService tests pass + - Ensure all tests pass, ask the user if questions arise. + +- [x] 8. Implement ProxmoxIntegration plugin class + - [x] 8.1 Create ProxmoxIntegration class extending BasePlugin + - Extend BasePlugin with "both" type + - Implement InformationSourcePlugin interface + - Implement ExecutionToolPlugin interface + - Implement constructor with logger and performanceMonitor + - _Requirements: 1.1, 1.2, 1.3_ + + - [x] 8.2 Implement plugin initialization and validation + - Implement performInitialization() method + - Implement validateProxmoxConfig() method + - Validate host, port, authentication, and realm + - Log security warning if SSL verification disabled + - Initialize ProxmoxService with validated config + - _Requirements: 1.4, 1.5, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 16.1, 16.2, 16.3, 16.4, 16.5, 16.6_ + + - [x] 8.3 Implement plugin interface methods + - Implement performHealthCheck() delegating to service + - Implement getInventory() delegating to service + - Implement getGroups() delegating to service + - Implement getNodeFacts() delegating to service + - Implement getNodeData() delegating to service + - Implement executeAction() delegating to service + - Implement listCapabilities() delegating to service + - Implement listProvisioningCapabilities() delegating to service + - _Requirements: 1.4, 4.1_ + +- [x] 9. Integrate with IntegrationManager + - [x] 9.1 Register ProxmoxIntegration with IntegrationManager + - Import ProxmoxIntegration in IntegrationManager + - Add proxmox to integration registry + - Ensure plugin participates in inventory aggregation + - _Requirements: 1.4, 9.5, 9.6_ + + - [x] 9.2 Update IntegrationManager for provisioning capabilities + - Add method to query provisioning capabilities from all plugins + - Aggregate provisioning capabilities across plugins + - _Requirements: 9.5, 9.6_ + +- [ ] 10. Write unit tests for ProxmoxClient + - [ ]* 10.1 Write unit tests for authentication + - Test password authentication with ticket storage + - Test token authentication + - Test authentication failure handling + - Test automatic ticket refresh on 401 + - Mock fetch responses + - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5_ + + - [ ]* 10.2 Write unit tests for HTTP methods + - Test get(), post(), delete() methods + - Test request header construction + - Test response parsing + - Mock Proxmox API responses + - _Requirements: 3.6_ + + - [ ]* 10.3 Write unit tests for error handling + - Test 401/403 authentication errors + - Test 404 not found errors + - Test 429 rate limiting with retry + - Test 5xx server errors + - Test network errors (ECONNRESET, ETIMEDOUT) + - _Requirements: 14.1, 14.2, 14.3, 14.4, 14.5, 14.6_ + + - [ ]* 10.4 Write unit tests for retry logic + - Test retry with exponential backoff + - Test max retry attempts + - Test non-retryable errors (auth, 4xx) + - Test retryable errors (network, 5xx) + - Test retry logging + - _Requirements: 15.1, 15.2, 15.3, 15.4, 15.5, 15.6_ + + - [ ]* 10.5 Write unit tests for task polling + - Test waitForTask() success case + - Test waitForTask() failure case + - Test task timeout + - Test custom timeout values + - Test polling interval + - _Requirements: 13.1, 13.2, 13.3, 13.4, 13.5, 13.6, 13.7_ + +- [ ] 11. Write unit tests for ProxmoxService + - [ ]* 11.1 Write unit tests for initialization and health check + - Test service initialization + - Test health check with successful API response + - Test health check with authentication failure (degraded) + - Test health check with connection failure (unhealthy) + - Test health check caching + - Mock ProxmoxClient + - _Requirements: 4.1, 4.2, 4.3, 4.4, 4.5, 4.6_ + + - [ ]* 11.2 Write unit tests for inventory discovery + - Test getInventory() with valid API response + - Test guest-to-node transformation + - Test inventory caching (60s TTL) + - Test cache hit vs cache miss + - Test empty inventory + - Mock cluster resources endpoint + - _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 20.1_ + + - [ ]* 11.3 Write unit tests for group management + - Test getGroups() with multiple nodes + - Test groupByNode() creates correct groups + - Test groupByStatus() creates correct groups + - Test groupByType() creates correct groups + - Test group ID format correctness + - Test groups caching (60s TTL) + - _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 20.2_ + + - [ ]* 11.4 Write unit tests for facts retrieval + - Test getNodeFacts() for VM + - Test getNodeFacts() for LXC + - Test facts transformation with config and status + - Test facts caching (30s TTL) + - Test non-existent guest error + - Test running vs stopped guest facts + - _Requirements: 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 20.3_ + + - [ ]* 11.5 Write unit tests for lifecycle actions + - Test executeAction() for start action + - Test executeAction() for stop action + - Test executeAction() for shutdown action + - Test executeAction() for reboot action + - Test executeAction() for suspend action + - Test executeAction() for resume action + - Test action failure with error details + - Test task completion waiting + - _Requirements: 8.1, 8.2, 8.3, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9, 8.10_ + + - [ ]* 11.6 Write unit tests for VM creation + - Test createVM() success case + - Test VMID uniqueness validation + - Test VM creation failure + - Test cache clearing after creation + - Mock VM creation endpoint + - _Requirements: 10.1, 10.2, 10.3, 10.4, 10.5, 10.6, 10.7_ + + - [ ]* 11.7 Write unit tests for LXC creation + - Test createLXC() success case + - Test VMID uniqueness validation + - Test LXC creation failure + - Test cache clearing after creation + - Mock LXC creation endpoint + - _Requirements: 11.1, 11.2, 11.3, 11.4, 11.5, 11.6, 11.7_ + + - [ ]* 11.8 Write unit tests for guest destruction + - Test destroyGuest() success case + - Test non-existent guest error + - Test stop-before-delete for running guest + - Test cache clearing after destruction + - Mock deletion endpoint + - _Requirements: 12.1, 12.2, 12.3, 12.4, 12.5, 12.6, 12.7_ + + - [ ]* 11.9 Write unit tests for capability listing + - Test listCapabilities() returns all lifecycle actions + - Test listProvisioningCapabilities() returns all provisioning actions + - Test capability parameter definitions + - _Requirements: 8.1, 8.2, 9.3, 9.4_ + +- [ ] 12. Write unit tests for ProxmoxIntegration + - [ ]* 12.1 Write unit tests for plugin initialization + - Test plugin initialization with valid config + - Test config validation for missing host + - Test config validation for invalid port + - Test config validation for missing authentication + - Test config validation for missing realm + - Test SSL verification warning + - _Requirements: 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 16.1, 16.2, 16.3, 16.4, 16.5, 16.6_ + + - [ ]* 12.2 Write unit tests for plugin interface methods + - Test performHealthCheck() delegation + - Test getInventory() delegation + - Test getGroups() delegation + - Test getNodeFacts() delegation + - Test executeAction() delegation + - Test listCapabilities() delegation + - Test listProvisioningCapabilities() delegation + - Mock ProxmoxService + - _Requirements: 1.4, 4.1_ + +- [ ] 13. Write property-based tests + - [ ]* 13.1 Write property test for configuration validation + - **Property 1: Configuration Validation Rejects Invalid Inputs** + - **Validates: Requirements 2.3, 2.4, 16.1, 16.2, 16.3, 16.5, 16.6** + - Generate invalid configs (missing fields, invalid ports, invalid hosts) + - Verify initialization throws descriptive errors + - Use fast-check with 100 iterations + + - [ ]* 13.2 Write property test for guest-to-node transformation + - **Property 4: Guest-to-Node Transformation Completeness** + - **Validates: Requirements 5.3, 5.4, 5.5, 5.6, 5.7** + - Generate random Proxmox guest objects + - Verify transformed Node has all required fields + - Verify type field correctness + - Verify IP field handling (present or omitted, never null) + - Use fast-check with 100 iterations + + - [ ]* 13.3 Write property test for group ID format + - **Property 8: Group ID Format Correctness** + - **Validates: Requirements 6.5, 6.6, 6.7** + - Generate random node names, statuses, and types + - Verify group IDs match expected format + - Use fast-check with 100 iterations + + - [ ]* 13.4 Write property test for caching behavior + - **Property 22, 23, 24: Cache Behavior** + - **Validates: Requirements 20.1, 20.2, 20.3** + - Test inventory cache TTL (60s) + - Test groups cache TTL (60s) + - Test facts cache TTL (30s) + - Verify cache hits don't trigger API calls + - Use fast-check with 100 iterations + + - [ ]* 13.5 Write property test for retry logic + - **Property 18: Retry Logic for Transient Failures** + - **Validates: Requirements 15.2** + - Generate transient network errors + - Verify retry attempts with exponential backoff + - Verify max retry limit + - Use fast-check with 100 iterations + +- [x] 14. Create API routes for Proxmox endpoints + - [x] 14.1 Create provisioning API routes + - Add POST /api/integrations/proxmox/provision/vm endpoint + - Add POST /api/integrations/proxmox/provision/lxc endpoint + - Add DELETE /api/integrations/proxmox/provision/:vmid endpoint + - Validate request parameters + - Call ProxmoxIntegration methods + - Return appropriate HTTP status codes + + - [x] 14.2 Create action API routes + - Add POST /api/integrations/proxmox/action endpoint + - Support all lifecycle actions (start, stop, shutdown, reboot, suspend, resume) + - Validate action parameters + - Call ProxmoxIntegration executeAction method + +- [x] 15. Write documentation + - [x] 15.1 Create integration documentation + - Create `docs/integrations/proxmox.md` + - Document all configuration options with examples + - Document authentication setup (password and token) + - Document all supported actions with parameters + - Document all provisioning capabilities with examples + - Include troubleshooting section + - Include example configuration snippets + - _Requirements: 18.1, 18.2, 18.3, 18.4, 18.5, 18.6, 18.7_ + + - [x] 15.2 Create configuration examples + - Document environment variable setup + - Provide example .env configuration + - Document Proxmox API token creation steps + - Document required permissions + - _Requirements: 18.5_ + +- [x] 16. Final checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +## Notes + +- Tasks marked with `*` are optional and can be skipped for faster MVP +- Each task references specific requirements for traceability +- Checkpoints ensure incremental validation +- Property tests validate universal correctness properties +- Unit tests validate specific examples and edge cases +- The implementation follows existing patterns from PuppetDB and SSH integrations +- TypeScript is used throughout for type safety +- All API communication uses HTTPS +- Caching improves performance and reduces API load +- Retry logic handles transient failures gracefully diff --git a/.kiro/specs/090/puppet-pabawi-refactoring/.config.kiro b/.kiro/specs/090/puppet-pabawi-refactoring/.config.kiro new file mode 100644 index 00000000..e2f97a05 --- /dev/null +++ b/.kiro/specs/090/puppet-pabawi-refactoring/.config.kiro @@ -0,0 +1 @@ +{"specId": "d5a7de16-585a-4236-9911-1cb9ca3b2ffa", "workflowType": "requirements-first", "specType": "feature"} diff --git a/.kiro/specs/090/puppet-pabawi-refactoring/design.md b/.kiro/specs/090/puppet-pabawi-refactoring/design.md new file mode 100644 index 00000000..5ba92fe4 --- /dev/null +++ b/.kiro/specs/090/puppet-pabawi-refactoring/design.md @@ -0,0 +1,621 @@ +# Design Document: Puppet-Pabawi Refactoring + +## Overview + +This design document specifies the refactoring of the puppet-pabawi module to introduce a consistent settings hash pattern across all integration classes, add SSH integration support, and properly scope command whitelisting parameters. The refactoring improves configuration flexibility by clearly separating Pabawi application configuration (written to .env file) from Puppet infrastructure management (package installation, file deployment, git repository cloning). + +### Goals + +1. Introduce a consistent `settings` hash parameter pattern across all integration classes +2. Add SSH integration support for remote command execution +3. Move command whitelist parameters from bolt.pp to docker.pp and nginx.pp where they are actually used +4. Maintain backward compatibility where possible through parameter defaults +5. Improve code maintainability and configuration clarity + +### Non-Goals + +1. Changing the underlying Pabawi application behavior +2. Modifying the .env file format or structure +3. Altering existing concat fragment ordering +4. Changing how the main pabawi class orchestrates integrations + +## Architecture + +### Settings Hash Pattern + +The refactoring introduces a two-tier parameter structure: + +**Settings Hash (Application Configuration)** + +- Contains key-value pairs written to the .env file +- Flexible schema - accepts any keys the Pabawi application understands +- Values are transformed based on type (Arrays → JSON, Booleans → lowercase strings, etc.) +- Prefixed with integration name when written to .env (e.g., `ANSIBLE_`, `BOLT_`) + +**Regular Class Parameters (Puppet Management)** + +- Handle infrastructure concerns: package installation, file deployment, git cloning +- Examples: `manage_package`, `inventory_source`, `ssl_ca_source` +- Not written to .env file - used by Puppet resources + +### Component Relationships + +```mermaid +graph TD + A[Main pabawi Class] --> B[install::docker] + A --> C[proxy::nginx] + A --> D[Integration Classes] + + D --> E[integrations::ansible] + D --> F[integrations::bolt] + D --> G[integrations::hiera] + D --> H[integrations::puppetdb] + D --> I[integrations::puppetserver] + D --> J[integrations::ssh - NEW] + + E --> K[.env File via Concat] + F --> K + G --> K + H --> K + I --> K + J --> K + B --> K + + E --> L[vcsrepo Resources] + F --> L + G --> L + + H --> M[File Resources for SSL] + I --> M + + B --> N[Docker Container] + N --> K + + C --> O[Nginx Config] +``` + +### Environment File Generation + +All integration classes write to `/opt/pabawi/.env` using concat fragments with specific ordering: + +- Order 10: Base configuration (docker.pp) +- Order 20: Bolt integration +- Order 21: PuppetDB integration +- Order 22: PuppetServer integration +- Order 23: Hiera integration +- Order 24: Ansible integration +- Order 25: SSH integration (new) + +### Command Whitelist Relocation + +Command whitelisting moves from bolt.pp to the classes that actually enforce it: + +- **docker.pp**: Writes `COMMAND_WHITELIST` and `COMMAND_WHITELIST_ALLOW_ALL` to .env file (used by containerized application) +- **nginx.pp**: Uses command whitelist in nginx configuration context (for request filtering) +- **bolt.pp**: No longer manages command whitelist parameters + +## Components and Interfaces + +### Integration Class Interface Pattern + +All integration classes follow this consistent interface: + +```puppet +class pabawi::integrations:: ( + Boolean $enabled = true, + Hash $settings = {}, + Boolean $manage_package = false, + Optional[String[1]] $_source = undef, + # ... additional source parameters as needed +) { + # Validation + # Package management (if manage_package) + # Resource deployment (if *_source provided) + # Concat fragment for .env file +} +``` + +### New SSH Integration Class + +**File**: `puppet-pabawi/manifests/integrations/ssh.pp` + +**Parameters**: + +- `enabled` (Boolean, default: true) - Enable SSH integration +- `settings` (Hash, default: {}) - Application configuration for SSH + +**Settings Hash Keys** (examples, flexible schema): + +- `host` - SSH host to connect to +- `port` - SSH port (default 22) +- `username` - SSH username +- `private_key_path` - Path to SSH private key +- `timeout` - Connection timeout in milliseconds +- `known_hosts_path` - Path to known_hosts file + +**Behavior**: + +- Writes `SSH_ENABLED=true` when enabled +- Writes all settings hash keys with `SSH_` prefix to .env +- Uses concat fragment order 25 + +### Refactored Ansible Integration + +**File**: `puppet-pabawi/manifests/integrations/ansible.pp` + +**Parameters**: + +- `enabled` (Boolean, default: true) +- `settings` (Hash, default: {}) - Application configuration +- `manage_package` (Boolean, default: false) +- `inventory_source` (Optional[String[1]]) - Git URL for inventory +- `playbook_source` (Optional[String[1]]) - Git URL for playbooks + +**Settings Hash Keys**: + +- `inventory_path` - Path where inventory is located +- `playbook_path` - Path where playbooks are located +- `execution_timeout` - Timeout in milliseconds +- `config` - Path to ansible.cfg + +**Behavior**: + +- When `inventory_source` provided: clones to `settings['inventory_path']` +- When `playbook_source` provided: clones to `settings['playbook_path']` +- Writes all settings with `ANSIBLE_` prefix to .env + +### Refactored Bolt Integration + +**File**: `puppet-pabawi/manifests/integrations/bolt.pp` + +**Parameters**: + +- `enabled` (Boolean, default: true) +- `settings` (Hash, default: {}) - Application configuration +- `manage_package` (Boolean, default: false) +- `project_path_source` (Optional[String[1]]) - Git URL for bolt project + +**Settings Hash Keys**: + +- `project_path` - Path to bolt project directory +- `execution_timeout` - Timeout in milliseconds + +**Behavior**: + +- When `project_path_source` provided: clones to `settings['project_path']` +- Writes all settings with `BOLT_` prefix to .env +- **REMOVES**: `command_whitelist` and `command_whitelist_allow_all` parameters + +### Refactored Hiera Integration + +**File**: `puppet-pabawi/manifests/integrations/hiera.pp` + +**Parameters**: + +- `enabled` (Boolean, default: true) +- `settings` (Hash, default: {}) - Application configuration +- `manage_package` (Boolean, default: false) +- `control_repo_source` (Optional[String[1]]) - Git URL for control repo + +**Settings Hash Keys**: + +- `control_repo_path` - Path to control repository +- `config_path` - Relative path to hiera.yaml +- `environments` - Array of environment names +- `fact_source_prefer_puppetdb` - Boolean for fact source preference +- `fact_source_local_path` - Path to local fact files + +**Behavior**: + +- When `control_repo_source` provided: clones to `settings['control_repo_path']` +- Writes all settings with `HIERA_` prefix to .env + +### Refactored PuppetDB Integration + +**File**: `puppet-pabawi/manifests/integrations/puppetdb.pp` + +**Parameters**: + +- `enabled` (Boolean, default: true) +- `settings` (Hash, default: {}) - Application configuration +- `ssl_ca_source` (Optional[String[1]]) - Source for CA certificate +- `ssl_cert_source` (Optional[String[1]]) - Source for client certificate +- `ssl_key_source` (Optional[String[1]]) - Source for private key + +**Settings Hash Keys**: + +- `server_url` - PuppetDB server URL +- `port` - PuppetDB port +- `ssl_enabled` - Boolean for SSL usage +- `ssl_ca` - Path to CA certificate +- `ssl_cert` - Path to client certificate +- `ssl_key` - Path to private key +- `ssl_reject_unauthorized` - Boolean for certificate validation + +**Behavior**: + +- When `ssl_*_source` provided: deploys certificates to paths in `settings['ssl_*']` +- Supports file://, https://, and local path formats for sources +- Writes all settings with `PUPPETDB_` prefix to .env + +### Refactored PuppetServer Integration + +**File**: `puppet-pabawi/manifests/integrations/puppetserver.pp` + +**Parameters**: + +- `enabled` (Boolean, default: true) +- `settings` (Hash, default: {}) - Application configuration +- `ssl_ca_source` (Optional[String[1]]) - Source for CA certificate +- `ssl_cert_source` (Optional[String[1]]) - Source for client certificate +- `ssl_key_source` (Optional[String[1]]) - Source for private key + +**Settings Hash Keys**: + +- `server_url` - Puppet Server URL +- `port` - Puppet Server port +- `ssl_enabled` - Boolean for SSL usage +- `ssl_ca` - Path to CA certificate +- `ssl_cert` - Path to client certificate +- `ssl_key` - Path to private key +- `ssl_reject_unauthorized` - Boolean for certificate validation +- `inactivity_threshold` - Node inactivity threshold in seconds +- `cache_ttl` - Cache TTL in milliseconds +- `circuit_breaker_threshold` - Failure count before circuit opens +- `circuit_breaker_timeout` - Circuit breaker timeout in milliseconds +- `circuit_breaker_reset_timeout` - Circuit breaker reset timeout in milliseconds + +**Behavior**: + +- When `ssl_*_source` provided: deploys certificates to paths in `settings['ssl_*']` +- Supports file://, https://, and local path formats for sources +- Writes all settings with `PUPPETSERVER_` prefix to .env + +### Updated Docker Class + +**File**: `puppet-pabawi/manifests/install/docker.pp` + +**New Parameters**: + +- `command_whitelist` (Array[String[1]], default: []) - Allowed commands +- `command_whitelist_allow_all` (Boolean, default: false) - Bypass whitelist + +**Behavior**: + +- Writes `COMMAND_WHITELIST` as JSON array to .env +- Writes `COMMAND_WHITELIST_ALLOW_ALL` as boolean to .env +- Maintains existing base configuration fragment (order 10) + +### Updated Nginx Class + +**File**: `puppet-pabawi/manifests/proxy/nginx.pp` + +**New Parameters**: + +- `command_whitelist` (Array[String[1]], default: []) - Allowed commands +- `command_whitelist_allow_all` (Boolean, default: false) - Bypass whitelist + +**Behavior**: + +- Uses command whitelist in nginx configuration template +- Applies whitelist filtering at reverse proxy level + +## Data Models + +### Settings Hash Structure + +The settings hash is flexible and integration-specific. Each integration defines which keys it expects: + +```puppet +# Ansible example +$ansible_settings = { + 'inventory_path' => '/opt/pabawi/ansible/inventory', + 'playbook_path' => '/opt/pabawi/ansible/playbooks', + 'execution_timeout' => 300000, + 'config' => '/etc/ansible/ansible.cfg', +} + +# PuppetDB example +$puppetdb_settings = { + 'server_url' => 'https://puppetdb.example.com', + 'port' => 8081, + 'ssl_enabled' => true, + 'ssl_ca' => '/opt/pabawi/certs/ca.pem', + 'ssl_cert' => '/opt/pabawi/certs/client.pem', + 'ssl_key' => '/opt/pabawi/certs/client-key.pem', + 'ssl_reject_unauthorized' => true, +} + +# SSH example +$ssh_settings = { + 'host' => 'remote.example.com', + 'port' => 22, + 'username' => 'automation', + 'private_key_path' => '/opt/pabawi/ssh/id_rsa', + 'timeout' => 30000, +} +``` + +### Environment Variable Transformation Rules + +| Puppet Type | .env Format | Example | +|-------------|-------------|---------| +| String | As-is | `ANSIBLE_CONFIG=/etc/ansible/ansible.cfg` | +| Integer | String representation | `PUPPETDB_PORT=8081` | +| Boolean | Lowercase string | `SSH_ENABLED=true` | +| Array | JSON string | `HIERA_ENVIRONMENTS=["production","development"]` | +| Undef/Empty | 'not-set' | `ANSIBLE_CONFIG=not-set` | + +### Git Repository Source Mapping + +| Source Parameter | Settings Hash Key | Purpose | +|------------------|-------------------|---------| +| `inventory_source` | `inventory_path` | Ansible inventory clone destination | +| `playbook_source` | `playbook_path` | Ansible playbooks clone destination | +| `project_path_source` | `project_path` | Bolt project clone destination | +| `control_repo_source` | `control_repo_path` | Hiera control repo clone destination | + +### SSL Certificate Source Mapping + +| Source Parameter | Settings Hash Key | Purpose | +|------------------|-------------------|---------| +| `ssl_ca_source` | `ssl_ca` | CA certificate deployment destination | +| `ssl_cert_source` | `ssl_cert` | Client certificate deployment destination | +| `ssl_key_source` | `ssl_key` | Private key deployment destination | + +## Correctness Properties + +*A property is a characteristic or behavior that should hold true across all valid executions of a system-essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.* + +### Property 1: Settings Hash to Environment Variable Transformation + +*For any* integration class and any settings hash key-value pair, when the integration writes to the .env file, the value SHALL be transformed according to its type: Arrays to JSON format, Booleans to lowercase strings (true/false), Strings as-is, Integers to string representation, and undef/empty values to 'not-set'. + +**Validates: Requirements 9.1, 9.2, 9.3, 9.4, 9.5, 2.6, 2.7** + +### Property 2: Settings Hash Prefix Application + +*For any* integration class and any key-value pair in its settings hash, when written to the .env file, the key SHALL be prefixed with the uppercase integration name followed by an underscore (e.g., ANSIBLE_, BOLT_, SSH_, HIERA_, PUPPETDB_, PUPPETSERVER_). + +**Validates: Requirements 1.5, 3.9, 4.7, 5.7, 6.9, 7.9** + +### Property 3: Git Repository Cloning with Source Parameters + +*For any* integration class with a source parameter (inventory_source, playbook_source, project_path_source, control_repo_source) containing a git URL, the class SHALL create a vcsrepo resource that clones to the path specified in the corresponding settings hash key (inventory_path, playbook_path, project_path, control_repo_path), with ensure => present, and SHALL create the parent directory before cloning. + +**Validates: Requirements 3.7, 3.8, 4.6, 5.6, 10.1, 10.2, 10.3, 10.4** + +### Property 4: Git Repository Resource Dependencies + +*For any* vcsrepo resource created by an integration class, the resource SHALL have a require relationship to the exec resource that creates its parent directory. + +**Validates: Requirements 10.5** + +### Property 5: SSL Certificate Deployment + +*For any* integration class (PuppetDB or PuppetServer) with ssl_ca_source, ssl_cert_source, or ssl_key_source parameters provided, the class SHALL deploy the certificate files to the paths specified in the settings hash (ssl_ca, ssl_cert, ssl_key), and SHALL support file://, https://, and local path formats for source parameters. + +**Validates: Requirements 6.7, 6.8, 7.7, 7.8** + +### Property 6: SSL Certificate File Permissions + +*For any* SSL certificate file deployed by an integration class, the file SHALL have mode 0644 for CA and certificate files, and mode 0600 for private key files. + +**Validates: Requirements 6.7, 7.7** (implicit security requirement) + +### Property 7: Settings Validation with Descriptive Errors + +*For any* integration class that is enabled and has required settings missing from the settings hash, the class SHALL fail with an error message that specifies both the required setting key name and the integration class name that generated the error, and this validation SHALL occur before any resources are created. + +**Validates: Requirements 8.1, 8.2, 8.3, 8.4** + +### Property 8: Enabled Integration Environment Variable + +*For any* integration class where the enabled parameter is true, the class SHALL write an environment variable {INTEGRATION}_ENABLED=true to the .env file (e.g., SSH_ENABLED=true, ANSIBLE_ENABLED=true). + +**Validates: Requirements 1.4** + +### Property 9: Concat Fragment Ordering Consistency + +*For any* integration class, the concat fragment used to write to the .env file SHALL use the assigned order number: Bolt (20), PuppetDB (21), PuppetServer (22), Hiera (23), Ansible (24), SSH (25). + +**Validates: Requirements 1.6, 3.10, 4.8, 5.8, 6.10, 7.10** + +## Error Handling + +### Validation Errors + +All integration classes perform parameter validation at the start of execution, before creating any resources: + +1. **Required Settings Validation**: When an integration is enabled, required settings must be present in the settings hash +2. **Source-Path Consistency**: When a *_source parameter is provided, the corresponding path key must exist in the settings hash +3. **SSL Configuration Validation**: When SSL is enabled, all three SSL source parameters (ca, cert, key) should be provided together + +### Error Message Format + +Error messages follow this pattern: + +``` +pabawi::integrations::{integration_name}: {setting_key} is required when enabled is true +``` + +Example: + +``` +pabawi::integrations::ansible: settings['inventory_path'] is required when enabled is true +``` + +### Git Repository Cloning Errors + +When git repository cloning fails: + +- vcsrepo resource will fail with standard Puppet error +- Parent directory creation failures will prevent vcsrepo execution +- Invalid git URLs will cause vcsrepo provider errors + +### SSL Certificate Deployment Errors + +When SSL certificate deployment fails: + +- file:// URLs: Puppet will fail if source file doesn't exist +- https:// URLs: curl exec will fail if download fails +- Local paths: Puppet will fail if source file doesn't exist +- Invalid paths in settings hash will cause file resource failures + +### Type Transformation Errors + +The settings hash accepts any value types, but unexpected types may cause issues: + +- Hash values: Not explicitly handled, may cause concat fragment errors +- Complex nested structures: May not serialize correctly to .env format +- Recommendation: Use only String, Integer, Boolean, and Array types in settings hash + +## Testing Strategy + +### Dual Testing Approach + +This refactoring requires both unit tests and property-based tests to ensure correctness: + +**Unit Tests** focus on: + +- Specific examples of integration configurations +- Edge cases (empty settings hash, missing required settings) +- Error conditions (invalid git URLs, missing SSL certificates) +- Integration points between classes and concat fragments +- Specific concat fragment order values +- Parameter interface validation (correct types, defaults) + +**Property-Based Tests** focus on: + +- Universal transformation rules (type-based value conversion) +- Settings hash prefix application across all integrations +- Git repository cloning behavior with various URLs and paths +- SSL certificate deployment with different source formats +- Validation error messages with various missing settings + +### Property-Based Testing Configuration + +We will use **rspec-puppet** with **rspec-puppet-facts** for property-based testing of Puppet code. While not a traditional PBT library, we can use parameterized tests with multiple fact sets and input combinations. + +**Test Configuration**: + +- Minimum 100 iterations per property test (achieved through parameterized test cases) +- Each property test references its design document property +- Tag format: `# Feature: puppet-pabawi-refactoring, Property {number}: {property_text}` + +**Example Property Test Structure**: + +```ruby +# Feature: puppet-pabawi-refactoring, Property 1: Settings Hash to Environment Variable Transformation +describe 'pabawi::integrations::ansible' do + [ + { input: ['cmd1', 'cmd2'], expected: '["cmd1","cmd2"]' }, + { input: true, expected: 'true' }, + { input: false, expected: 'false' }, + { input: 'string_value', expected: 'string_value' }, + { input: 12345, expected: '12345' }, + { input: nil, expected: 'not-set' }, + ].each do |test_case| + context "with settings value #{test_case[:input].inspect}" do + let(:params) do + { settings: { 'test_key' => test_case[:input] } } + end + + it 'transforms value correctly in concat fragment' do + is_expected.to contain_concat__fragment('pabawi_env_ansible') + .with_content(/ANSIBLE_TEST_KEY=#{Regexp.escape(test_case[:expected])}/) + end + end + end +end +``` + +### Unit Test Coverage Areas + +1. **SSH Integration Class**: + - Class exists and is properly defined + - Accepts settings hash parameter + - Accepts enabled parameter with default true + - Writes SSH_ENABLED to .env when enabled + - Uses concat fragment order 25 + - Writes settings with SSH_ prefix + +2. **Command Whitelist Relocation**: + - bolt.pp does not have command_whitelist parameters + - docker.pp accepts command_whitelist and command_whitelist_allow_all + - nginx.pp accepts command_whitelist and command_whitelist_allow_all + - docker.pp writes COMMAND_WHITELIST as JSON array + - docker.pp writes COMMAND_WHITELIST_ALLOW_ALL as boolean + +3. **Ansible Integration Refactoring**: + - Accepts settings hash parameter + - Accepts inventory_source and playbook_source parameters + - Creates vcsrepo resources when sources provided + - Writes settings with ANSIBLE_ prefix + - Uses concat fragment order 24 + +4. **Bolt Integration Refactoring**: + - Accepts settings hash parameter + - Accepts project_path_source parameter + - Creates vcsrepo resource when source provided + - Writes settings with BOLT_ prefix + - Uses concat fragment order 20 + - Does not include command_whitelist parameters + +5. **Hiera Integration Refactoring**: + - Accepts settings hash parameter + - Accepts control_repo_source parameter + - Creates vcsrepo resource when source provided + - Writes settings with HIERA_ prefix + - Uses concat fragment order 23 + +6. **PuppetDB Integration Refactoring**: + - Accepts settings hash parameter + - Accepts ssl_ca_source, ssl_cert_source, ssl_key_source parameters + - Deploys SSL certificates when sources provided + - Supports file://, https://, and local path formats + - Writes settings with PUPPETDB_ prefix + - Uses concat fragment order 21 + +7. **PuppetServer Integration Refactoring**: + - Accepts settings hash parameter + - Accepts ssl_ca_source, ssl_cert_source, ssl_key_source parameters + - Deploys SSL certificates when sources provided + - Supports file://, https://, and local path formats + - Writes settings with PUPPETSERVER_ prefix + - Uses concat fragment order 22 + +### Integration Testing + +Integration tests should verify: + +- Complete .env file generation with multiple integrations enabled +- Concat fragment ordering produces correct file structure +- Docker container receives correct .env file +- Git repositories are cloned to correct locations +- SSL certificates are deployed with correct permissions +- Command whitelist is properly passed to Docker container + +### Test Execution + +Tests should be run with minimal verbosity: + +```bash +# Run all tests +bundle exec rake spec + +# Run specific integration tests +bundle exec rspec spec/classes/integrations/ssh_spec.rb + +# Run with specific fact sets +SPEC_FACTS_OS=ubuntu-20.04-x86_64 bundle exec rake spec +``` + +### Backward Compatibility Testing + +Since this is a refactoring, we need to ensure backward compatibility: + +- Test that existing configurations still work (with deprecation warnings if needed) +- Verify that default values maintain current behavior +- Check that .env file format remains unchanged +- Ensure concat fragment ordering is preserved diff --git a/.kiro/specs/090/puppet-pabawi-refactoring/requirements.md b/.kiro/specs/090/puppet-pabawi-refactoring/requirements.md new file mode 100644 index 00000000..e0e8dfbb --- /dev/null +++ b/.kiro/specs/090/puppet-pabawi-refactoring/requirements.md @@ -0,0 +1,163 @@ +# Requirements Document + +## Introduction + +This document specifies requirements for refactoring the puppet-pabawi module to improve configuration flexibility, standardize parameter handling across integrations, add SSH integration support, and properly scope command whitelisting parameters. The refactoring will introduce a settings hash pattern for Pabawi application configuration (values written to .env file) while maintaining regular class parameters for Puppet-specific management tasks (package management, file deployment, git repository sources). + +## Glossary + +- **Integration_Class**: A Puppet class in the manifests/integrations directory that configures Pabawi integration with external tools (Ansible, Bolt, Hiera, PuppetDB, PuppetServer, SSH) +- **Settings_Hash**: A Hash parameter containing Pabawi application configuration that gets written to the .env file (e.g., paths used by the app, timeouts, URLs, ports) +- **Source_Parameter**: A regular class parameter used by Puppet to manage file deployment or git repository cloning (e.g., inventory_source, ssl_ca_source) +- **Command_Whitelist**: An array of allowed commands for execution control +- **Environment_File**: The .env file generated by concat fragments containing integration configuration +- **Docker_Class**: The manifests/install/docker.pp class that manages Docker-based Pabawi installation +- **Nginx_Class**: The manifests/proxy/nginx.pp class that manages nginx reverse proxy configuration +- **Bolt_Integration**: The manifests/integrations/bolt.pp class for Puppet Bolt integration +- **SSH_Integration**: A new integration class for SSH-based operations + +## Requirements + +### Requirement 1: SSH Integration Support + +**User Story:** As a Pabawi administrator, I want to configure SSH integration, so that I can execute commands on remote systems via SSH. + +#### Acceptance Criteria + +1. THE Module SHALL provide an SSH_Integration class at manifests/integrations/ssh.pp +2. THE SSH_Integration SHALL accept a Settings_Hash parameter for configuration +3. THE SSH_Integration SHALL accept an enabled Boolean parameter with default value true +4. WHEN enabled is true, THE SSH_Integration SHALL write SSH_ENABLED=true to the Environment_File +5. THE SSH_Integration SHALL write all Settings_Hash key-value pairs to the Environment_File with SSH_ prefix +6. THE SSH_Integration SHALL use concat fragment order 25 for Environment_File integration + +### Requirement 2: Command Whitelist Parameter Relocation + +**User Story:** As a Pabawi administrator, I want command whitelisting parameters in the classes that actually use them, so that the configuration is more intuitive and maintainable. + +#### Acceptance Criteria + +1. THE Bolt_Integration SHALL NOT include command_whitelist or command_whitelist_allow_all parameters +2. THE Docker_Class SHALL accept a command_whitelist Array parameter with default empty array +3. THE Docker_Class SHALL accept a command_whitelist_allow_all Boolean parameter with default value false +4. THE Nginx_Class SHALL accept a command_whitelist Array parameter with default empty array +5. THE Nginx_Class SHALL accept a command_whitelist_allow_all Boolean parameter with default value false +6. WHEN Docker_Class writes to Environment_File, THE Docker_Class SHALL include COMMAND_WHITELIST as JSON array +7. WHEN Docker_Class writes to Environment_File, THE Docker_Class SHALL include COMMAND_WHITELIST_ALLOW_ALL as Boolean +8. THE Nginx_Class SHALL write COMMAND_WHITELIST and COMMAND_WHITELIST_ALLOW_ALL to nginx configuration context + +### Requirement 3: Ansible Integration Settings Hash + +**User Story:** As a Pabawi administrator, I want to configure Ansible integration using a settings hash for application configuration and regular parameters for Puppet management, so that the distinction between app config and infrastructure management is clear. + +#### Acceptance Criteria + +1. THE Ansible_Integration SHALL accept a Settings_Hash parameter for Pabawi application configuration +2. THE Ansible_Integration SHALL accept inventory_source as a regular class parameter for git repository URL +3. THE Ansible_Integration SHALL accept playbook_source as a regular class parameter for git repository URL +4. THE Ansible_Integration SHALL accept a manage_package Boolean parameter with default value false +5. THE Ansible_Integration SHALL accept an enabled Boolean parameter with default value true +6. THE Settings_Hash SHALL support keys: inventory_path, playbook_path, execution_timeout, config +7. WHEN inventory_source parameter is provided, THE Ansible_Integration SHALL clone the git repository to the path specified in Settings_Hash inventory_path +8. WHEN playbook_source parameter is provided, THE Ansible_Integration SHALL clone the git repository to the path specified in Settings_Hash playbook_path +9. FOR ALL Settings_Hash key-value pairs, THE Ansible_Integration SHALL write them to Environment_File with ANSIBLE_ prefix +10. THE Ansible_Integration SHALL use concat fragment order 24 for Environment_File integration + +### Requirement 4: Bolt Integration Settings Hash + +**User Story:** As a Pabawi administrator, I want to configure Bolt integration using a settings hash for application configuration and regular parameters for Puppet management, so that the distinction between app config and infrastructure management is clear. + +#### Acceptance Criteria + +1. THE Bolt_Integration SHALL accept a Settings_Hash parameter for Pabawi application configuration +2. THE Bolt_Integration SHALL accept project_path_source as a regular class parameter for git repository URL +3. THE Bolt_Integration SHALL accept a manage_package Boolean parameter with default value false +4. THE Bolt_Integration SHALL accept an enabled Boolean parameter with default value true +5. THE Settings_Hash SHALL support keys: project_path, execution_timeout +6. WHEN project_path_source parameter is provided, THE Bolt_Integration SHALL clone the git repository to the path specified in Settings_Hash project_path +7. FOR ALL Settings_Hash key-value pairs, THE Bolt_Integration SHALL write them to Environment_File with BOLT_ prefix +8. THE Bolt_Integration SHALL use concat fragment order 20 for Environment_File integration + +### Requirement 5: Hiera Integration Settings Hash + +**User Story:** As a Pabawi administrator, I want to configure Hiera integration using a settings hash for application configuration and regular parameters for Puppet management, so that the distinction between app config and infrastructure management is clear. + +#### Acceptance Criteria + +1. THE Hiera_Integration SHALL accept a Settings_Hash parameter for Pabawi application configuration +2. THE Hiera_Integration SHALL accept control_repo_source as a regular class parameter for git repository URL +3. THE Hiera_Integration SHALL accept a manage_package Boolean parameter with default value false +4. THE Hiera_Integration SHALL accept an enabled Boolean parameter with default value true +5. THE Settings_Hash SHALL support keys: control_repo_path, config_path, environments, fact_source_prefer_puppetdb, fact_source_local_path +6. WHEN control_repo_source parameter is provided, THE Hiera_Integration SHALL clone the git repository to the path specified in Settings_Hash control_repo_path +7. FOR ALL Settings_Hash key-value pairs, THE Hiera_Integration SHALL write them to Environment_File with HIERA_ prefix +8. THE Hiera_Integration SHALL use concat fragment order 23 for Environment_File integration + +### Requirement 6: PuppetDB Integration Settings Hash + +**User Story:** As a Pabawi administrator, I want to configure PuppetDB integration using a settings hash for application configuration and regular parameters for Puppet file deployment, so that the distinction between app config and infrastructure management is clear. + +#### Acceptance Criteria + +1. THE PuppetDB_Integration SHALL accept a Settings_Hash parameter for Pabawi application configuration +2. THE PuppetDB_Integration SHALL accept ssl_ca_source as a regular class parameter for Puppet file source +3. THE PuppetDB_Integration SHALL accept ssl_cert_source as a regular class parameter for Puppet file source +4. THE PuppetDB_Integration SHALL accept ssl_key_source as a regular class parameter for Puppet file source +5. THE PuppetDB_Integration SHALL accept an enabled Boolean parameter with default value true +6. THE Settings_Hash SHALL support keys: server_url, port, ssl_enabled, ssl_ca, ssl_cert, ssl_key, ssl_reject_unauthorized +7. WHEN ssl_ca_source, ssl_cert_source, or ssl_key_source parameters are provided, THE PuppetDB_Integration SHALL deploy SSL certificates to the paths specified in Settings_Hash (ssl_ca, ssl_cert, ssl_key) +8. THE PuppetDB_Integration SHALL support file://, https://, and local path formats for SSL certificate source parameters +9. FOR ALL Settings_Hash key-value pairs, THE PuppetDB_Integration SHALL write them to Environment_File with PUPPETDB_ prefix +10. THE PuppetDB_Integration SHALL use concat fragment order 21 for Environment_File integration + +### Requirement 7: PuppetServer Integration Settings Hash + +**User Story:** As a Pabawi administrator, I want to configure PuppetServer integration using a settings hash for application configuration and regular parameters for Puppet file deployment, so that the distinction between app config and infrastructure management is clear. + +#### Acceptance Criteria + +1. THE PuppetServer_Integration SHALL accept a Settings_Hash parameter for Pabawi application configuration +2. THE PuppetServer_Integration SHALL accept ssl_ca_source as a regular class parameter for Puppet file source +3. THE PuppetServer_Integration SHALL accept ssl_cert_source as a regular class parameter for Puppet file source +4. THE PuppetServer_Integration SHALL accept ssl_key_source as a regular class parameter for Puppet file source +5. THE PuppetServer_Integration SHALL accept an enabled Boolean parameter with default value true +6. THE Settings_Hash SHALL support keys: server_url, port, ssl_enabled, ssl_ca, ssl_cert, ssl_key, ssl_reject_unauthorized, inactivity_threshold, cache_ttl, circuit_breaker_threshold, circuit_breaker_timeout, circuit_breaker_reset_timeout +7. WHEN ssl_ca_source, ssl_cert_source, or ssl_key_source parameters are provided, THE PuppetServer_Integration SHALL deploy SSL certificates to the paths specified in Settings_Hash (ssl_ca, ssl_cert, ssl_key) +8. THE PuppetServer_Integration SHALL support file://, https://, and local path formats for SSL certificate source parameters +9. FOR ALL Settings_Hash key-value pairs, THE PuppetServer_Integration SHALL write them to Environment_File with PUPPETSERVER_ prefix +10. THE PuppetServer_Integration SHALL use concat fragment order 22 for Environment_File integration + +### Requirement 8: Settings Hash Validation + +**User Story:** As a Pabawi administrator, I want clear error messages when required settings are missing, so that I can quickly identify and fix configuration issues. + +#### Acceptance Criteria + +1. WHEN an Integration_Class is enabled and required settings are missing from Settings_Hash, THE Integration_Class SHALL fail with a descriptive error message +2. THE error message SHALL specify which setting key is required +3. THE error message SHALL specify which Integration_Class generated the error +4. FOR ALL Integration_Classes, validation SHALL occur before any resources are created + +### Requirement 9: Settings Hash to Environment Variable Transformation + +**User Story:** As a Pabawi administrator, I want settings hash values properly formatted in the environment file, so that the application can parse them correctly. + +#### Acceptance Criteria + +1. WHEN a Settings_Hash value is an Array, THE Integration_Class SHALL convert it to JSON format in the Environment_File +2. WHEN a Settings_Hash value is a Boolean, THE Integration_Class SHALL write it as lowercase true or false +3. WHEN a Settings_Hash value is a String, THE Integration_Class SHALL write it as-is +4. WHEN a Settings_Hash value is an Integer, THE Integration_Class SHALL write it as a string representation +5. WHEN a Settings_Hash value is undef or empty, THE Integration_Class SHALL write 'not-set' to the Environment_File + +### Requirement 10: Git Repository Management + +**User Story:** As a Pabawi administrator, I want integration classes to automatically clone git repositories from source parameters, so that I can manage configuration as code. + +#### Acceptance Criteria + +1. WHEN a Source_Parameter (inventory_source, playbook_source, project_path_source, control_repo_source) contains a git URL, THE Integration_Class SHALL use vcsrepo to clone the repository +2. THE Integration_Class SHALL create parent directories before cloning repositories +3. THE Integration_Class SHALL use the corresponding Settings_Hash path key as the clone destination (e.g., inventory_source clones to inventory_path from Settings_Hash) +4. WHEN a repository is already cloned, THE Integration_Class SHALL ensure it remains present +5. THE vcsrepo resource SHALL require the parent directory creation exec resource diff --git a/.kiro/specs/090/puppet-pabawi-refactoring/tasks.md b/.kiro/specs/090/puppet-pabawi-refactoring/tasks.md new file mode 100644 index 00000000..3ee88228 --- /dev/null +++ b/.kiro/specs/090/puppet-pabawi-refactoring/tasks.md @@ -0,0 +1,236 @@ +# Implementation Plan: Puppet-Pabawi Refactoring + +## Overview + +This implementation plan refactors the puppet-pabawi module to introduce a consistent settings hash pattern across all integration classes, add SSH integration support, and relocate command whitelist parameters to the classes that actually use them. The refactoring separates Pabawi application configuration (written to .env file) from Puppet infrastructure management (package installation, file deployment, git repository cloning). + +## Tasks + +- [ ] 1. Create SSH integration class + - [x] 1.1 Implement manifests/integrations/ssh.pp with settings hash pattern + - Create class with enabled and settings parameters + - Implement concat fragment for .env file with SSH_ prefix + - Use concat fragment order 25 + - Write SSH_ENABLED when enabled is true + - _Requirements: 1.1, 1.2, 1.3, 1.4, 1.5, 1.6_ + + - [ ]* 1.2 Write unit tests for SSH integration + - Test class parameter interface + - Test concat fragment creation and ordering + - Test SSH_ENABLED environment variable + - Test settings hash prefix application + - _Requirements: 1.1, 1.2, 1.3, 1.4, 1.5, 1.6_ + + - [ ]* 1.3 Write property test for SSH settings transformation + - **Property 1: Settings Hash to Environment Variable Transformation** + - **Property 2: Settings Hash Prefix Application** + - **Validates: Requirements 1.5, 9.1, 9.2, 9.3, 9.4, 9.5** + +- [ ] 2. Refactor Ansible integration class + - [x] 2.1 Update manifests/integrations/ansible.pp with settings hash pattern + - Add settings hash parameter + - Rename source parameters (inventory_source, playbook_source) + - Implement git repository cloning with vcsrepo + - Create parent directory exec resources before vcsrepo + - Update concat fragment to use settings hash with ANSIBLE_ prefix + - Maintain concat fragment order 24 + - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 3.10_ + + - [ ]* 2.2 Write unit tests for Ansible integration + - Test settings hash parameter interface + - Test git repository cloning with inventory_source and playbook_source + - Test parent directory creation + - Test concat fragment with ANSIBLE_ prefix + - Test manage_package parameter + - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 3.10_ + + - [ ]* 2.3 Write property tests for Ansible integration + - **Property 3: Git Repository Cloning with Source Parameters** + - **Property 4: Git Repository Resource Dependencies** + - **Validates: Requirements 3.7, 3.8, 10.1, 10.2, 10.3, 10.4, 10.5** + +- [ ] 3. Refactor Bolt integration class + - [x] 3.1 Update manifests/integrations/bolt.pp with settings hash pattern + - Add settings hash parameter + - Remove command_whitelist and command_whitelist_allow_all parameters + - Rename project_path_source parameter + - Implement git repository cloning with vcsrepo + - Create parent directory exec resource before vcsrepo + - Update concat fragment to use settings hash with BOLT_ prefix + - Maintain concat fragment order 20 + - _Requirements: 2.1, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8_ + + - [ ]* 3.2 Write unit tests for Bolt integration + - Test settings hash parameter interface + - Test git repository cloning with project_path_source + - Test parent directory creation + - Test concat fragment with BOLT_ prefix + - Verify command_whitelist parameters are removed + - _Requirements: 2.1, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8_ + + - [ ]* 3.3 Write property tests for Bolt integration + - **Property 3: Git Repository Cloning with Source Parameters** + - **Property 4: Git Repository Resource Dependencies** + - **Validates: Requirements 4.6, 10.1, 10.2, 10.3, 10.4, 10.5** + +- [ ] 4. Refactor Hiera integration class + - [x] 4.1 Update manifests/integrations/hiera.pp with settings hash pattern + - Add settings hash parameter + - Rename control_repo_source parameter + - Implement git repository cloning with vcsrepo + - Create parent directory exec resource before vcsrepo + - Update concat fragment to use settings hash with HIERA_ prefix + - Maintain concat fragment order 23 + - _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8_ + + - [ ]* 4.2 Write unit tests for Hiera integration + - Test settings hash parameter interface + - Test git repository cloning with control_repo_source + - Test parent directory creation + - Test concat fragment with HIERA_ prefix + - Test array settings transformation (environments) + - _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8_ + + - [ ]* 4.3 Write property tests for Hiera integration + - **Property 3: Git Repository Cloning with Source Parameters** + - **Property 4: Git Repository Resource Dependencies** + - **Validates: Requirements 5.6, 10.1, 10.2, 10.3, 10.4, 10.5** + +- [x] 5. Checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +- [ ] 6. Refactor PuppetDB integration class + - [x] 6.1 Update manifests/integrations/puppetdb.pp with settings hash pattern + - Add settings hash parameter + - Add ssl_ca_source, ssl_cert_source, ssl_key_source parameters + - Implement SSL certificate deployment with file resources + - Support file://, https://, and local path formats for SSL sources + - Update concat fragment to use settings hash with PUPPETDB_ prefix + - Maintain concat fragment order 21 + - _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 6.10_ + + - [ ]* 6.2 Write unit tests for PuppetDB integration + - Test settings hash parameter interface + - Test SSL certificate deployment with various source formats + - Test file permissions (0644 for ca/cert, 0600 for key) + - Test concat fragment with PUPPETDB_ prefix + - Test boolean settings transformation (ssl_enabled, ssl_reject_unauthorized) + - _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 6.10_ + + - [ ]* 6.3 Write property tests for PuppetDB integration + - **Property 5: SSL Certificate Deployment** + - **Property 6: SSL Certificate File Permissions** + - **Validates: Requirements 6.7, 6.8** + +- [ ] 7. Refactor PuppetServer integration class + - [x] 7.1 Update manifests/integrations/puppetserver.pp with settings hash pattern + - Add settings hash parameter + - Add ssl_ca_source, ssl_cert_source, ssl_key_source parameters + - Implement SSL certificate deployment with file resources + - Support file://, https://, and local path formats for SSL sources + - Update concat fragment to use settings hash with PUPPETSERVER_ prefix + - Maintain concat fragment order 22 + - _Requirements: 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 7.10_ + + - [ ]* 7.2 Write unit tests for PuppetServer integration + - Test settings hash parameter interface + - Test SSL certificate deployment with various source formats + - Test file permissions (0644 for ca/cert, 0600 for key) + - Test concat fragment with PUPPETSERVER_ prefix + - Test integer settings transformation (timeouts, thresholds) + - _Requirements: 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 7.10_ + + - [ ]* 7.3 Write property tests for PuppetServer integration + - **Property 5: SSL Certificate Deployment** + - **Property 6: SSL Certificate File Permissions** + - **Validates: Requirements 7.7, 7.8** + +- [ ] 8. Update Docker class with command whitelist parameters + - [x] 8.1 Update manifests/install/docker.pp to add command whitelist parameters + - Add command_whitelist Array parameter with default empty array + - Add command_whitelist_allow_all Boolean parameter with default false + - Update concat fragment to write COMMAND_WHITELIST as JSON array + - Update concat fragment to write COMMAND_WHITELIST_ALLOW_ALL as boolean + - Maintain concat fragment order 10 + - _Requirements: 2.2, 2.3, 2.6, 2.7_ + + - [ ]* 8.2 Write unit tests for Docker class command whitelist + - Test command_whitelist parameter interface + - Test command_whitelist_allow_all parameter interface + - Test COMMAND_WHITELIST JSON array transformation + - Test COMMAND_WHITELIST_ALLOW_ALL boolean transformation + - _Requirements: 2.2, 2.3, 2.6, 2.7_ + + - [ ]* 8.3 Write property test for command whitelist transformation + - **Property 1: Settings Hash to Environment Variable Transformation** + - **Validates: Requirements 2.6, 2.7, 9.1, 9.2** + +- [ ] 9. Update Nginx class with command whitelist parameters + - [x] 9.1 Update manifests/proxy/nginx.pp to add command whitelist parameters + - Add command_whitelist Array parameter with default empty array + - Add command_whitelist_allow_all Boolean parameter with default false + - Update nginx configuration template to use command whitelist + - _Requirements: 2.4, 2.5, 2.8_ + + - [ ]* 9.2 Write unit tests for Nginx class command whitelist + - Test command_whitelist parameter interface + - Test command_whitelist_allow_all parameter interface + - Test nginx configuration template includes whitelist + - _Requirements: 2.4, 2.5, 2.8_ + +- [x] 10. Checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +- [ ] 11. Implement settings validation across all integration classes + - [x] 11.1 Add validation logic to all integration classes + - Implement validation for required settings when enabled is true + - Implement source-path consistency validation + - Implement SSL configuration validation (all three SSL sources together) + - Generate descriptive error messages with integration name and setting key + - Ensure validation occurs before resource creation + - _Requirements: 8.1, 8.2, 8.3, 8.4_ + + - [ ]* 11.2 Write unit tests for settings validation + - Test validation errors for missing required settings + - Test error message format includes integration name and setting key + - Test validation occurs before resource creation + - Test source-path consistency validation + - Test SSL configuration validation + - _Requirements: 8.1, 8.2, 8.3, 8.4_ + + - [ ]* 11.3 Write property test for validation error messages + - **Property 7: Settings Validation with Descriptive Errors** + - **Validates: Requirements 8.1, 8.2, 8.3, 8.4** + +- [ ] 12. Implement universal property tests for all integrations + - [ ]* 12.1 Write property test for enabled parameter behavior + - **Property 8: Enabled Integration Environment Variable** + - Test across all integration classes (SSH, Ansible, Bolt, Hiera, PuppetDB, PuppetServer) + - **Validates: Requirements 1.4, 3.5, 4.4, 5.4, 6.5, 7.5** + + - [ ]* 12.2 Write property test for concat fragment ordering + - **Property 9: Concat Fragment Ordering Consistency** + - Test all integration classes use correct order numbers + - **Validates: Requirements 1.6, 3.10, 4.8, 5.8, 6.10, 7.10** + + - [ ]* 12.3 Write property test for settings hash transformation across all integrations + - **Property 1: Settings Hash to Environment Variable Transformation** + - **Property 2: Settings Hash Prefix Application** + - Test with various data types (String, Integer, Boolean, Array, undef) + - Test across all integration classes + - **Validates: Requirements 9.1, 9.2, 9.3, 9.4, 9.5** + +- [x] 13. Final checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + +## Notes + +- Tasks marked with `*` are optional and can be skipped for faster MVP +- Each task references specific requirements for traceability +- Checkpoints ensure incremental validation at reasonable breaks +- Property tests validate universal correctness properties across all integrations +- Unit tests validate specific examples, edge cases, and integration-specific behavior +- All integration classes follow the same settings hash pattern for consistency +- Git repository cloning requires parent directory creation first (Property 4) +- SSL certificate deployment supports multiple source formats (Property 5) +- Settings hash values are transformed based on type when written to .env (Property 1) diff --git a/.kiro/specs/rbac-authorization/.config.kiro b/.kiro/specs/090/rbac-authorization/.config.kiro similarity index 100% rename from .kiro/specs/rbac-authorization/.config.kiro rename to .kiro/specs/090/rbac-authorization/.config.kiro diff --git a/.kiro/specs/rbac-authorization/design.md b/.kiro/specs/090/rbac-authorization/design.md similarity index 100% rename from .kiro/specs/rbac-authorization/design.md rename to .kiro/specs/090/rbac-authorization/design.md diff --git a/.kiro/specs/rbac-authorization/requirements.md b/.kiro/specs/090/rbac-authorization/requirements.md similarity index 100% rename from .kiro/specs/rbac-authorization/requirements.md rename to .kiro/specs/090/rbac-authorization/requirements.md diff --git a/.kiro/specs/rbac-authorization/tasks.md b/.kiro/specs/090/rbac-authorization/tasks.md similarity index 100% rename from .kiro/specs/rbac-authorization/tasks.md rename to .kiro/specs/090/rbac-authorization/tasks.md diff --git a/.kiro/specs/ssh-integration/.config.kiro b/.kiro/specs/090/ssh-integration/.config.kiro similarity index 100% rename from .kiro/specs/ssh-integration/.config.kiro rename to .kiro/specs/090/ssh-integration/.config.kiro diff --git a/.kiro/specs/ssh-integration/design.md b/.kiro/specs/090/ssh-integration/design.md similarity index 100% rename from .kiro/specs/ssh-integration/design.md rename to .kiro/specs/090/ssh-integration/design.md diff --git a/.kiro/specs/ssh-integration/requirements.md b/.kiro/specs/090/ssh-integration/requirements.md similarity index 100% rename from .kiro/specs/ssh-integration/requirements.md rename to .kiro/specs/090/ssh-integration/requirements.md diff --git a/.kiro/specs/ssh-integration/tasks.md b/.kiro/specs/090/ssh-integration/tasks.md similarity index 100% rename from .kiro/specs/ssh-integration/tasks.md rename to .kiro/specs/090/ssh-integration/tasks.md diff --git a/.kiro/specs/azure-support/.config.kiro b/.kiro/specs/azure-support/.config.kiro new file mode 100644 index 00000000..85ad8938 --- /dev/null +++ b/.kiro/specs/azure-support/.config.kiro @@ -0,0 +1 @@ +{"specId": "de78ec71-9d3d-416b-a6d0-15bb727e8bf5", "workflowType": "requirements-first", "specType": "feature"} diff --git a/.kiro/specs/azure-support/design.md b/.kiro/specs/azure-support/design.md new file mode 100644 index 00000000..53577b9a --- /dev/null +++ b/.kiro/specs/azure-support/design.md @@ -0,0 +1,508 @@ +# Documento di Design — Supporto Azure + +## Panoramica + +Questo documento descrive il design tecnico per l'integrazione di Microsoft Azure in Pabawi. Il plugin Azure seguirà gli stessi pattern architetturali già consolidati nel progetto (AWS, Proxmox), estendendo `BasePlugin` e implementando le interfacce `InformationSourcePlugin` e `ExecutionToolPlugin` per fornire inventario VM, raggruppamento, facts e gestione del ciclo di vita delle macchine virtuali Azure. + +L'integrazione utilizza l'Azure SDK per JavaScript (`@azure/arm-compute`, `@azure/arm-network`, `@azure/identity`, `@azure/arm-subscriptions`) per comunicare con le API Azure tramite autenticazione Service Principal. + +### Decisioni di design chiave + +1. **Pattern Service/Plugin separati**: Come per AWS, la logica Azure SDK è incapsulata in `AzureService` (chiamate API), mentre `AzurePlugin` gestisce il ciclo di vita del plugin e l'integrazione con Pabawi. +2. **Autenticazione Service Principal**: Si utilizza `ClientSecretCredential` da `@azure/identity`, coerente con scenari server-side non interattivi. +3. **Filtri opzionali**: Resource Group e Region sono filtri opzionali applicati a livello di `AzureService` per limitare lo scope dell'inventario. +4. **Degradazione graduale**: Il plugin restituisce dati vuoti in caso di errore, senza interrompere le altre integrazioni, seguendo il pattern di `BasePlugin`. + +## Architettura + +### Diagramma dei componenti + +```mermaid +graph TB + subgraph Pabawi Backend + CM[ConfigService] -->|azure config| IM[IntegrationManager] + IM -->|register| AP[AzurePlugin] + AP -->|delega| AS[AzureService] + IM -->|link nodes| NLS[NodeLinkingService] + end + + subgraph Azure SDK + AS -->|ClientSecretCredential| AI[@azure/identity] + AS -->|VM operations| AC[@azure/arm-compute] + AS -->|Network info| AN[@azure/arm-network] + AS -->|Subscription info| ASub[@azure/arm-subscriptions] + end + + subgraph Azure Cloud + AC -->|REST API| AZ[Azure Resource Manager] + AN -->|REST API| AZ + ASub -->|REST API| AZ + AI -->|OAuth2| AAD[Azure AD / Entra ID] + end +``` + +### Flusso di inizializzazione + +```mermaid +sequenceDiagram + participant CS as ConfigService + participant IM as IntegrationManager + participant AP as AzurePlugin + participant AS as AzureService + participant AZ as Azure API + + CS->>IM: azure config (enabled: true) + IM->>AP: initialize(config) + AP->>AP: validateAzureConfig() + AP->>AS: new AzureService(config) + AS->>AZ: ClientSecretCredential.getToken() + alt Autenticazione OK + AZ-->>AS: token + AS-->>AP: initialized + AP-->>IM: initialized: true + else Autenticazione fallita + AZ-->>AS: errore + AS-->>AP: errore + AP->>AP: log errore, initialized: false + AP-->>IM: initialized: false (no throw) + end +``` + +### Flusso di inventario + +```mermaid +sequenceDiagram + participant IM as IntegrationManager + participant AP as AzurePlugin + participant AS as AzureService + participant AZ as Azure API + + IM->>AP: getInventory() + AP->>AS: getInventory() + AS->>AZ: computeClient.virtualMachines.listAll() + AZ-->>AS: VM[] + AS->>AZ: networkClient.publicIPAddresses.listAll() + AZ-->>AS: PublicIP[] + AS->>AS: transformVMToNode(vm) + AS-->>AP: Node[] + AP-->>IM: Node[] +``` + +## Componenti e Interfacce + +### 1. Configurazione Azure nel ConfigService + +Estensione del metodo `parseIntegrationsConfig()` in `ConfigService.ts` per includere la configurazione Azure. + +```typescript +// Aggiunta al tipo di ritorno di parseIntegrationsConfig() +azure?: { + enabled: boolean; + tenantId: string; + clientId: string; + clientSecret: string; + subscriptionId: string; + resourceGroup?: string; + region?: string; + priority?: number; +}; +``` + +**Variabili d'ambiente:** + +| Variabile | Obbligatoria | Descrizione | +|---|---|---| +| `AZURE_ENABLED` | Sì | Abilita il plugin (`true`/`false`) | +| `AZURE_TENANT_ID` | Sì (se enabled) | ID del tenant Azure AD | +| `AZURE_CLIENT_ID` | Sì (se enabled) | Client ID del Service Principal | +| `AZURE_CLIENT_SECRET` | Sì (se enabled) | Client Secret del Service Principal | +| `AZURE_SUBSCRIPTION_ID` | Sì (se enabled) | ID della sottoscrizione Azure | +| `AZURE_RESOURCE_GROUP` | No | Filtra per Resource Group | +| `AZURE_REGION` | No | Filtra per regione Azure | +| `AZURE_PRIORITY` | No | Priorità nell'aggregazione inventario | + +**Validazione:** Se `AZURE_ENABLED=true`, `AZURE_TENANT_ID` e `AZURE_SUBSCRIPTION_ID` sono obbligatori. L'assenza genera un errore con messaggio specifico. + +### 2. AzureService (`backend/src/integrations/azure/AzureService.ts`) + +Servizio che incapsula tutte le chiamate all'Azure SDK. Segue lo stesso pattern di `AWSService`. + +```typescript +class AzureService { + constructor(config: AzureConfig, logger: LoggerService); + + // Autenticazione + async validateCredentials(): Promise<{ subscriptionName: string; subscriptionId: string }>; + + // Inventario + async getInventory(): Promise; + async getGroups(): Promise; + async getNodeFacts(nodeId: string): Promise; + async getNodeData(nodeId: string, dataType: string): Promise; + + // Ciclo di vita VM + async startVM(resourceGroup: string, vmName: string): Promise; + async stopVM(resourceGroup: string, vmName: string): Promise; + async deallocateVM(resourceGroup: string, vmName: string): Promise; + async restartVM(resourceGroup: string, vmName: string): Promise; +} +``` + +**Metodi privati chiave:** + +- `listAllVMs()`: Elenca le VM, applicando filtri opzionali per Resource Group e Region +- `transformVMToNode(vm, networkInfo)`: Mappa una VM Azure a un oggetto `Node` +- `transformToFacts(nodeId, vm, networkInfo)`: Mappa una VM Azure a un oggetto `Facts` +- `resolveVMIPAddress(vm)`: Risolve l'IP privato/pubblico di una VM tramite le interfacce di rete +- `groupByResourceGroup(nodes)`: Raggruppa i nodi per Resource Group +- `groupByRegion(nodes)`: Raggruppa i nodi per regione +- `groupByTags(nodes)`: Raggruppa i nodi per tag Azure +- `parseNodeId(nodeId)`: Estrae resourceGroup e vmName da un nodeId formato `azure::` + +### 3. AzurePlugin (`backend/src/integrations/azure/AzurePlugin.ts`) + +Plugin che estende `BasePlugin` e implementa `InformationSourcePlugin` e `ExecutionToolPlugin`. + +```typescript +class AzurePlugin extends BasePlugin implements InformationSourcePlugin, ExecutionToolPlugin { + type: "both" = "both"; + + constructor(logger?: LoggerService, performanceMonitor?: PerformanceMonitorService); + + // BasePlugin + protected performInitialization(): Promise; + protected performHealthCheck(): Promise>; + + // InformationSourcePlugin + async getInventory(): Promise; + async getGroups(): Promise; + async getNodeFacts(nodeId: string): Promise; + async getNodeData(nodeId: string, dataType: string): Promise; + + // ExecutionToolPlugin + async executeAction(action: Action): Promise; + listCapabilities(): Capability[]; +} +``` + +**Comportamento di degradazione:** + +- Se `initialized === false`, `getInventory()` e `getGroups()` restituiscono `[]` +- Se l'API Azure fallisce durante una query, il plugin logga l'errore e restituisce dati vuoti +- Le transizioni di stato `healthy: true ↔ false` vengono loggate con livello appropriato (warning/info) + +### 4. Tipi Azure (`backend/src/integrations/azure/types.ts`) + +```typescript +export interface AzureConfig { + tenantId: string; + clientId: string; + clientSecret: string; + subscriptionId: string; + resourceGroup?: string; + region?: string; +} + +export class AzureAuthenticationError extends Error { + constructor(message: string) { + super(message); + this.name = "AzureAuthenticationError"; + } +} +``` + +### 5. Integrazione con IntegrationManager + +La registrazione del plugin avviene nel punto di bootstrap dell'applicazione (dove vengono registrati gli altri plugin), seguendo lo stesso pattern: + +```typescript +if (azureConfig?.enabled) { + const azurePlugin = new AzurePlugin(logger, performanceMonitor); + integrationManager.registerPlugin(azurePlugin, { + enabled: true, + name: "azure", + type: "both", + config: azureConfig, + priority: azureConfig.priority, + }); +} +``` + +## Modelli Dati + +### Mappatura VM Azure → Node + +| Campo Node | Sorgente Azure | Note | +|---|---|---| +| `id` | `azure::` | Formato univoco per identificare la VM | +| `name` | `vm.name` | Nome della VM Azure | +| `uri` | IP privato → IP pubblico → `vm.name` | Fallback a catena | +| `transport` | `"ssh"` (Linux) / `"winrm"` (Windows) | Basato su `vm.storageProfile.osDisk.osType` | +| `config` | `{}` | Configurazione di default | +| `source` | `"azure"` | Identificatore sorgente | + +### Mappatura VM Azure → Facts + +```typescript +{ + nodeId: "azure::", + gatheredAt: "", + source: "azure", + facts: { + vmSize: "Standard_D2s_v3", + location: "westeurope", + provisioningState: "Succeeded", + powerState: "running", + osType: "Linux", + osDiskSizeGB: 30, + privateIpAddress: "10.0.0.4", + publicIpAddress: "20.1.2.3", + resourceGroup: "my-rg", + subscriptionId: "sub-id", + tags: { env: "production", team: "infra" }, + os: { family: "Linux", name: "Ubuntu", release: { full: "22.04", major: "22" } }, + processors: { count: 2, models: ["Standard_D2s_v3"] }, + memory: { system: { total: "8 GB", available: "N/A" } }, + networking: { + hostname: "my-vm", + interfaces: { eth0: { ip: "10.0.0.4", public: "20.1.2.3" } } + } + } +} +``` + +### Formato NodeGroup + +| Tipo raggruppamento | Formato `id` | Esempio | +|---|---|---| +| Resource Group | `azure:rg:` | `azure:rg:my-resource-group` | +| Regione | `azure:region:` | `azure:region:westeurope` | +| Tag | `azure:tag::` | `azure:tag:env:production` | + +### Formato nodeId + +Il `nodeId` per le VM Azure segue il formato: `azure::` + +Questo formato permette di: + +- Identificare univocamente la VM +- Estrarre il Resource Group necessario per le operazioni di ciclo di vita +- Mantenere coerenza con il pattern AWS (`aws::`) + +### Capability del plugin + +```typescript +[ + { + name: "start", + description: "Avvia una VM Azure", + parameters: [ + { name: "target", type: "string", required: true, description: "nodeId della VM" } + ] + }, + { + name: "stop", + description: "Arresta una VM Azure", + parameters: [ + { name: "target", type: "string", required: true, description: "nodeId della VM" } + ] + }, + { + name: "deallocate", + description: "Dealloca una VM Azure (rilascia risorse compute)", + parameters: [ + { name: "target", type: "string", required: true, description: "nodeId della VM" } + ] + }, + { + name: "restart", + description: "Riavvia una VM Azure", + parameters: [ + { name: "target", type: "string", required: true, description: "nodeId della VM" } + ] + } +] +``` + +## Proprietà di Correttezza + +*Una proprietà è una caratteristica o un comportamento che deve essere vero in tutte le esecuzioni valide di un sistema — essenzialmente, un'affermazione formale su ciò che il sistema deve fare. Le proprietà fungono da ponte tra le specifiche leggibili dall'uomo e le garanzie di correttezza verificabili dalla macchina.* + +### Proprietà 1: Round-trip della configurazione Azure + +*Per qualsiasi* insieme valido di variabili d'ambiente Azure (con `AZURE_ENABLED=true`, `tenantId`, `clientId`, `clientSecret`, `subscriptionId` obbligatori, e `resourceGroup`, `region`, `priority` opzionali), il parsing tramite `ConfigService.parseIntegrationsConfig()` deve produrre un oggetto `azure` i cui campi corrispondono esattamente ai valori delle variabili d'ambiente originali. + +**Valida: Requisiti 1.1, 1.4, 1.5, 1.6** + +### Proprietà 2: Configurazione disabilitata esclude Azure + +*Per qualsiasi* valore di `AZURE_ENABLED` diverso da `"true"` (incluso `undefined`, `"false"`, stringhe casuali), l'oggetto restituito da `parseIntegrationsConfig()` non deve contenere la chiave `azure`. + +**Valida: Requisiti 1.7** + +### Proprietà 3: Plugin non operativo restituisce dati vuoti senza eccezioni + +*Per qualsiasi* istanza di `AzurePlugin` che si trova nello stato `initialized: false` o `healthy: false`, le chiamate a `getInventory()` e `getGroups()` devono restituire array vuoti, e `getNodeFacts(nodeId)` deve restituire un oggetto `Facts` vuoto, senza lanciare eccezioni, indipendentemente dal `nodeId` fornito. + +**Valida: Requisiti 2.5, 7.2** + +### Proprietà 4: Mappatura VM → Node preserva source e campi obbligatori + +*Per qualsiasi* VM Azure restituita dall'API, la trasformazione in oggetto `Node` deve produrre un nodo con: `source` uguale a `"azure"`, `name` uguale al nome della VM, `id` nel formato `azure::`, e `uri` non vuoto (IP privato, IP pubblico, o nome VM come fallback). + +**Valida: Requisiti 3.1, 3.2, 9.3** + +### Proprietà 5: I filtri di inventario restituiscono solo VM corrispondenti + +*Per qualsiasi* insieme di VM Azure e qualsiasi combinazione di filtri attivi (`resourceGroup`, `region`), `getInventory()` deve restituire esclusivamente VM che soddisfano tutti i filtri configurati. Se `resourceGroup` è configurato, ogni VM restituita deve appartenere a quel Resource Group. Se `region` è configurata, ogni VM restituita deve trovarsi in quella regione. + +**Valida: Requisiti 3.3, 3.4** + +### Proprietà 6: Raggruppamento completo e coerente + +*Per qualsiasi* insieme di VM Azure, `getGroups()` deve restituire: un `NodeGroup` per ogni Resource Group distinto (con id `azure:rg:`), un `NodeGroup` per ogni regione distinta (con id `azure:region:`), un `NodeGroup` per ogni coppia tag chiave:valore distinta (con id `azure:tag::`). Ogni `NodeGroup` deve avere `source` uguale a `"azure"`, e ogni nodo deve apparire in esattamente un gruppo per Resource Group e un gruppo per regione. + +**Valida: Requisiti 4.1, 4.2, 4.3, 4.4** + +### Proprietà 7: Facts contengono tutti i campi richiesti + +*Per qualsiasi* VM Azure valida, `getNodeFacts(nodeId)` deve restituire un oggetto `Facts` contenente almeno i campi: `vmSize`, `location`, `provisioningState`, `powerState`, `osType`, `resourceGroup`, `subscriptionId`, e `tags`. + +**Valida: Requisiti 5.1** + +### Proprietà 8: Coerenza del risultato delle operazioni di ciclo di vita + +*Per qualsiasi* operazione di ciclo di vita (start, stop, deallocate, restart) su una VM Azure, il campo `success` dell'`ExecutionResult` restituito deve essere `true` se e solo se l'API Azure ha completato l'operazione senza errori. In caso di errore, `success` deve essere `false` e il campo `error` deve contenere il messaggio di errore dell'API. + +**Valida: Requisiti 8.5, 8.6** + +## Gestione degli Errori + +### Strategia generale + +Il plugin Azure segue la strategia di degradazione graduale già adottata dagli altri plugin di Pabawi: + +1. **Errori di autenticazione**: Catturati durante `performInitialization()`. Il plugin imposta `initialized: false` e logga l'errore. Non viene lanciata alcuna eccezione verso l'`IntegrationManager`. + +2. **Errori API durante le query**: Catturati in ogni metodo pubblico (`getInventory`, `getGroups`, `getNodeFacts`, `getNodeData`). Il plugin logga l'errore e restituisce dati vuoti (array vuoto o Facts vuoto). + +3. **Errori durante le operazioni di ciclo di vita**: Catturati in `executeAction()`. Il plugin restituisce un `ExecutionResult` con `success: false` e il messaggio di errore. + +4. **Errori di configurazione**: Lanciati durante il parsing in `ConfigService` (campi obbligatori mancanti). Questi errori impediscono l'avvio dell'applicazione, coerentemente con il comportamento degli altri plugin. + +### Classificazione degli errori + +| Tipo errore | Classe | Comportamento | +|---|---|---| +| Credenziali non valide/scadute | `AzureAuthenticationError` | `initialized: false`, log error | +| API non raggiungibile | `Error` (generico) | Dati vuoti, log error | +| VM non trovata | N/A | Facts vuoto, log warning | +| Permessi insufficienti | `AzureAuthenticationError` | `degraded: true`, log warning | +| Timeout API | `Error` (generico) | Dati vuoti, log error | +| Config mancante | `Error` | Throw durante startup | + +### Transizioni di stato e logging + +```mermaid +stateDiagram-v2 + [*] --> Uninitialized + Uninitialized --> Initialized: autenticazione OK + Uninitialized --> Failed: autenticazione fallita + Initialized --> Healthy: healthCheck OK + Initialized --> Unhealthy: healthCheck fallito + Healthy --> Unhealthy: healthCheck fallito [log WARNING] + Unhealthy --> Healthy: healthCheck OK [log INFO] + Healthy --> Degraded: permessi parziali [log WARNING] + Degraded --> Healthy: permessi ripristinati [log INFO] + Failed --> Initialized: retry manuale +``` + +## Strategia di Testing + +### Approccio duale + +Il testing del plugin Azure utilizza un approccio duale complementare: + +- **Test unitari (Vitest)**: Verificano esempi specifici, edge case e condizioni di errore con mock delle API Azure +- **Test property-based (fast-check + Vitest)**: Verificano proprietà universali su input generati casualmente + +### Libreria di property-based testing + +Si utilizza `fast-check` (già presente nel progetto) integrato con Vitest. Ogni test property-based deve: + +- Eseguire almeno 100 iterazioni (`numRuns: 100`) +- Referenziare la proprietà del design document tramite commento tag +- Formato tag: `Feature: azure-support, Property {numero}: {titolo}` + +### Test unitari + +I test unitari coprono: + +1. **Configurazione** (Requisiti 1.x): + - Parsing corretto con tutte le variabili obbligatorie + - Errore con `AZURE_TENANT_ID` mancante (messaggio specifico) + - Errore con `AZURE_SUBSCRIPTION_ID` mancante (messaggio specifico) + - Esclusione config quando `AZURE_ENABLED` non è `true` + +2. **Inizializzazione** (Requisiti 2.x): + - Registrazione come tipo `"both"` + - Autenticazione con Service Principal (mock) + - Stato `initialized: true` dopo autenticazione riuscita + - Stato `initialized: false` dopo autenticazione fallita, senza throw + +3. **Health check** (Requisiti 6.x): + - `healthy: true` con nome Subscription quando connettività OK + - `healthy: false` con messaggio errore per credenziali non valide + - `healthy: false` con messaggio connettività per API non raggiungibili + - `degraded: true` con capabilities per permessi parziali + +4. **Degradazione** (Requisiti 7.x): + - Log warning su transizione healthy→unhealthy + - Log info su transizione unhealthy→healthy + +5. **Ciclo di vita VM** (Requisiti 8.x): + - Azioni start, stop, deallocate, restart (mock API) + - `listCapabilities()` restituisce le 4 azioni + +6. **Dettagli VM** (Requisiti 5.x): + - `getNodeData(nodeId, "status")` restituisce stato esecuzione + - `getNodeData(nodeId, "network")` restituisce info rete + - `getNodeFacts(nodeId)` con nodeId non valido restituisce Facts vuoto + +### Test property-based + +Ogni proprietà del design document è implementata da un singolo test property-based: + +| Proprietà | Generatore | Verifica | +|---|---|---| +| 1: Round-trip config | `fc.record({tenantId: fc.uuid(), ...})` | Parsing env → oggetto config equivalente | +| 2: Config disabilitata | `fc.string().filter(s => s !== "true")` | `azure` assente dall'oggetto integrazioni | +| 3: Plugin non operativo | `fc.string()` (nodeId casuali) | Array vuoti, nessuna eccezione | +| 4: Mappatura VM→Node | `fc.record({name, resourceGroup, ...})` | `source="azure"`, `id` formato corretto, `uri` non vuoto | +| 5: Filtri inventario | `fc.array(vmArbitrary)` + filtri | Solo VM corrispondenti ai filtri | +| 6: Raggruppamento | `fc.array(vmArbitrary)` | Gruppi per RG, regione, tag completi e coerenti | +| 7: Facts completi | `fc.record({vmSize, location, ...})` | Tutti i campi richiesti presenti | +| 8: Risultato ciclo di vita | `fc.constantFrom("start","stop","deallocate","restart")` | `success` coerente con esito API | + +### Struttura dei file di test + +``` +backend/src/integrations/azure/__tests__/ +├── AzurePlugin.test.ts # Test unitari del plugin +├── AzureService.test.ts # Test unitari del servizio +├── AzurePlugin.property.test.ts # Test property-based +└── AzureConfig.test.ts # Test configurazione +``` + +### Dipendenze Azure SDK + +```json +{ + "@azure/identity": "^4.x", + "@azure/arm-compute": "^21.x", + "@azure/arm-network": "^33.x", + "@azure/arm-subscriptions": "^5.x" +} +``` + +Queste dipendenze vengono mockate nei test unitari e property-based per evitare chiamate reali alle API Azure. diff --git a/.kiro/specs/azure-support/requirements.md b/.kiro/specs/azure-support/requirements.md new file mode 100644 index 00000000..e244635a --- /dev/null +++ b/.kiro/specs/azure-support/requirements.md @@ -0,0 +1,146 @@ +# Documento dei Requisiti — Supporto Azure + +## Introduzione + +Questa specifica definisce i requisiti per l'integrazione di Microsoft Azure in Pabawi. L'obiettivo è aggiungere un plugin Azure che implementi l'interfaccia `InformationSourcePlugin` (e opzionalmente `ExecutionToolPlugin`) per consentire la gestione delle macchine virtuali Azure direttamente dall'interfaccia unificata di Pabawi, seguendo gli stessi pattern architetturali già utilizzati per AWS e Proxmox. + +## Glossario + +- **Azure_Plugin**: Plugin di integrazione Azure per Pabawi, che estende `BasePlugin` e implementa le interfacce `InformationSourcePlugin` e `ExecutionToolPlugin` +- **Azure_Service**: Servizio interno che incapsula le chiamate alle API Azure SDK (`@azure/arm-compute`, `@azure/identity`) +- **ConfigService**: Servizio di configurazione esistente di Pabawi che carica e valida le variabili d'ambiente +- **IntegrationManager**: Gestore centrale dei plugin che registra, inizializza e orchestra tutte le integrazioni +- **VM**: Macchina virtuale Azure (Azure Virtual Machine) +- **Service_Principal**: Identità applicativa Azure utilizzata per l'autenticazione tramite `clientId`, `clientSecret` e `tenantId` +- **Subscription**: Sottoscrizione Azure che raggruppa le risorse e la fatturazione +- **Resource_Group**: Contenitore logico Azure che raggruppa risorse correlate +- **Node**: Rappresentazione interna di Pabawi di un host gestito, definita in `integrations/bolt/types.ts` +- **NodeGroup**: Raggruppamento logico di nodi in Pabawi, definito in `integrations/types.ts` +- **HealthStatus**: Interfaccia standard di Pabawi per lo stato di salute di un plugin + +## Requisiti + +### Requisito 1: Configurazione del plugin Azure tramite variabili d'ambiente + +**User Story:** Come amministratore di Pabawi, voglio configurare l'integrazione Azure tramite variabili d'ambiente, in modo da poter abilitare e personalizzare il collegamento ad Azure senza modificare il codice. + +#### Criteri di Accettazione + +1. WHEN la variabile `AZURE_ENABLED` è impostata a `true`, THE ConfigService SHALL analizzare e validare le variabili d'ambiente Azure (`AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`) +2. WHEN la variabile `AZURE_ENABLED` è impostata a `true` e `AZURE_TENANT_ID` non è definita, THE ConfigService SHALL generare un errore con il messaggio "AZURE_TENANT_ID is required when AZURE_ENABLED is true" +3. WHEN la variabile `AZURE_ENABLED` è impostata a `true` e `AZURE_SUBSCRIPTION_ID` non è definita, THE ConfigService SHALL generare un errore con il messaggio "AZURE_SUBSCRIPTION_ID is required when AZURE_ENABLED is true" +4. THE ConfigService SHALL supportare la variabile opzionale `AZURE_RESOURCE_GROUP` per filtrare le risorse a un singolo Resource Group +5. THE ConfigService SHALL supportare la variabile opzionale `AZURE_REGION` per filtrare le VM a una specifica regione Azure +6. THE ConfigService SHALL supportare la variabile opzionale `AZURE_PRIORITY` per definire la priorità del plugin nell'aggregazione dell'inventario +7. IF la variabile `AZURE_ENABLED` non è definita o è impostata a `false`, THEN THE ConfigService SHALL escludere la configurazione Azure dall'oggetto integrazioni + +### Requisito 2: Registrazione e inizializzazione del plugin Azure + +**User Story:** Come amministratore di Pabawi, voglio che il plugin Azure si registri automaticamente nell'IntegrationManager all'avvio, in modo che sia disponibile insieme alle altre integrazioni. + +#### Criteri di Accettazione + +1. WHEN il ConfigService restituisce una configurazione Azure con `enabled: true`, THE Azure_Plugin SHALL registrarsi nell'IntegrationManager come plugin di tipo `both` +2. WHEN il Azure_Plugin viene inizializzato, THE Azure_Plugin SHALL autenticarsi verso Azure utilizzando le credenziali Service Principal configurate (`tenantId`, `clientId`, `clientSecret`) +3. WHEN l'autenticazione Azure ha successo, THE Azure_Plugin SHALL impostare il proprio stato interno a `initialized: true` +4. IF l'autenticazione Azure fallisce durante l'inizializzazione, THEN THE Azure_Plugin SHALL registrare l'errore nel log e impostare il proprio stato a `initialized: false` senza interrompere l'avvio di Pabawi +5. WHILE il Azure_Plugin è nello stato `initialized: false`, THE Azure_Plugin SHALL restituire array vuoti per le chiamate a `getInventory()` e `getGroups()` + +### Requisito 3: Inventario delle macchine virtuali Azure + +**User Story:** Come operatore, voglio visualizzare le macchine virtuali Azure nell'inventario unificato di Pabawi, in modo da avere una visione completa dell'infrastruttura. + +#### Criteri di Accettazione + +1. WHEN viene invocato `getInventory()`, THE Azure_Plugin SHALL restituire un array di oggetti `Node` corrispondenti alle VM Azure presenti nella Subscription configurata +2. THE Azure_Plugin SHALL mappare ogni VM Azure a un oggetto `Node` con i seguenti campi: `name` (nome della VM), `uri` (indirizzo IP privato o pubblico della VM), `source` impostato a `"azure"` +3. WHERE la variabile `AZURE_RESOURCE_GROUP` è configurata, THE Azure_Plugin SHALL filtrare l'inventario restituendo solo le VM appartenenti al Resource Group specificato +4. WHERE la variabile `AZURE_REGION` è configurata, THE Azure_Plugin SHALL filtrare l'inventario restituendo solo le VM nella regione specificata +5. WHEN una VM Azure non ha un indirizzo IP assegnato, THE Azure_Plugin SHALL impostare il campo `uri` del Node al nome della VM +6. IF l'API Azure restituisce un errore durante il recupero dell'inventario, THEN THE Azure_Plugin SHALL registrare l'errore nel log e restituire un array vuoto + +### Requisito 4: Raggruppamento delle macchine virtuali Azure + +**User Story:** Come operatore, voglio che le VM Azure siano organizzate in gruppi logici nell'inventario di Pabawi, in modo da poter filtrare e gestire le risorse per Resource Group, regione o tag. + +#### Criteri di Accettazione + +1. WHEN viene invocato `getGroups()`, THE Azure_Plugin SHALL restituire un array di oggetti `NodeGroup` che raggruppano le VM per Resource Group +2. THE Azure_Plugin SHALL creare un NodeGroup aggiuntivo per ogni regione Azure contenente VM, con formato id `azure:region:` +3. THE Azure_Plugin SHALL creare un NodeGroup per ogni tag Azure presente sulle VM, con formato id `azure:tag::` +4. THE Azure_Plugin SHALL impostare il campo `source` di ogni NodeGroup a `"azure"` +5. IF l'API Azure restituisce un errore durante il recupero dei gruppi, THEN THE Azure_Plugin SHALL registrare l'errore nel log e restituire un array vuoto + +### Requisito 5: Dettagli e facts delle macchine virtuali Azure + +**User Story:** Come operatore, voglio consultare i dettagli di una specifica VM Azure, in modo da conoscerne la configurazione e lo stato corrente. + +#### Criteri di Accettazione + +1. WHEN viene invocato `getNodeFacts(nodeId)` con un nodeId valido, THE Azure_Plugin SHALL restituire un oggetto `Facts` contenente le proprietà della VM Azure: `vmSize`, `location`, `provisioningState`, `powerState`, `osType`, `osDiskSizeGB`, `privateIpAddress`, `publicIpAddress`, `resourceGroup`, `subscriptionId`, `tags` +2. WHEN viene invocato `getNodeFacts(nodeId)` con un nodeId che non corrisponde a nessuna VM Azure, THE Azure_Plugin SHALL restituire un oggetto `Facts` vuoto +3. IF l'API Azure restituisce un errore durante il recupero dei facts, THEN THE Azure_Plugin SHALL registrare l'errore nel log e restituire un oggetto `Facts` vuoto +4. WHEN viene invocato `getNodeData(nodeId, "status")`, THE Azure_Plugin SHALL restituire lo stato di esecuzione della VM (running, stopped, deallocated) +5. WHEN viene invocato `getNodeData(nodeId, "network")`, THE Azure_Plugin SHALL restituire le informazioni di rete della VM (interfacce di rete, IP privati, IP pubblici, security groups associati) + +### Requisito 6: Health check del plugin Azure + +**User Story:** Come amministratore di Pabawi, voglio monitorare lo stato di connessione del plugin Azure, in modo da identificare rapidamente problemi di autenticazione o connettività. + +#### Criteri di Accettazione + +1. WHEN viene invocato `healthCheck()`, THE Azure_Plugin SHALL verificare la connettività verso le API Azure tentando di elencare le sottoscrizioni accessibili +2. WHEN la verifica di connettività ha successo, THE Azure_Plugin SHALL restituire un oggetto `HealthStatus` con `healthy: true` e un messaggio che include il nome della Subscription +3. IF le credenziali Azure sono scadute o non valide, THEN THE Azure_Plugin SHALL restituire un oggetto `HealthStatus` con `healthy: false` e un messaggio descrittivo dell'errore di autenticazione +4. IF le API Azure non sono raggiungibili, THEN THE Azure_Plugin SHALL restituire un oggetto `HealthStatus` con `healthy: false` e un messaggio che indica il problema di connettività +5. WHEN il plugin ha accesso parziale alle risorse (permessi IAM insufficienti per alcune operazioni), THE Azure_Plugin SHALL restituire un oggetto `HealthStatus` con `degraded: true`, elencando le capability funzionanti in `workingCapabilities` e quelle non funzionanti in `failingCapabilities` + +### Requisito 7: Degradazione graduale del plugin Azure + +**User Story:** Come operatore, voglio che Pabawi continui a funzionare correttamente anche quando l'integrazione Azure non è disponibile, in modo da non perdere l'accesso alle altre integrazioni. + +#### Criteri di Accettazione + +1. IF il plugin Azure non riesce a inizializzarsi, THEN THE IntegrationManager SHALL continuare l'inizializzazione degli altri plugin senza interruzioni +2. WHILE il plugin Azure è nello stato `healthy: false`, THE Azure_Plugin SHALL restituire dati vuoti per tutte le query di inventario senza generare eccezioni non gestite +3. WHEN il plugin Azure passa dallo stato `healthy: true` a `healthy: false`, THE Azure_Plugin SHALL registrare un messaggio di warning nel log con i dettagli dell'errore +4. WHEN il plugin Azure passa dallo stato `healthy: false` a `healthy: true`, THE Azure_Plugin SHALL registrare un messaggio informativo nel log indicando il ripristino della connessione + +### Requisito 8: Gestione del ciclo di vita delle VM Azure + +**User Story:** Come operatore, voglio poter avviare, arrestare e deallocare le VM Azure direttamente da Pabawi, in modo da gestire le risorse cloud dall'interfaccia unificata. + +#### Criteri di Accettazione + +1. WHEN viene invocato `executeAction()` con tipo `command` e azione `start`, THE Azure_Plugin SHALL avviare la VM Azure specificata nel target +2. WHEN viene invocato `executeAction()` con tipo `command` e azione `stop`, THE Azure_Plugin SHALL arrestare la VM Azure specificata nel target +3. WHEN viene invocato `executeAction()` con tipo `command` e azione `deallocate`, THE Azure_Plugin SHALL deallocare la VM Azure specificata nel target +4. WHEN viene invocato `executeAction()` con tipo `command` e azione `restart`, THE Azure_Plugin SHALL riavviare la VM Azure specificata nel target +5. WHEN un'operazione di ciclo di vita viene completata con successo, THE Azure_Plugin SHALL restituire un oggetto `ExecutionResult` con `success: true` e i dettagli dell'operazione +6. IF un'operazione di ciclo di vita fallisce, THEN THE Azure_Plugin SHALL restituire un oggetto `ExecutionResult` con `success: false` e il messaggio di errore dell'API Azure +7. THE Azure_Plugin SHALL esporre le operazioni di ciclo di vita tramite `listCapabilities()` con i parametri richiesti per ogni azione + +### Requisito 9: Integrazione nell'inventario aggregato + +**User Story:** Come operatore, voglio che le VM Azure appaiano nell'inventario aggregato di Pabawi insieme ai nodi delle altre integrazioni, in modo da avere una visione unificata dell'infrastruttura. + +#### Criteri di Accettazione + +1. THE IntegrationManager SHALL includere i nodi restituiti dal Azure_Plugin nell'inventario aggregato quando il plugin è abilitato e inizializzato +2. WHEN una VM Azure ha lo stesso hostname di un nodo proveniente da un'altra integrazione (Bolt, PuppetDB, Ansible, SSH, AWS, Proxmox), THE IntegrationManager SHALL collegare i nodi tramite il NodeLinkingService +3. THE Azure_Plugin SHALL impostare il campo `source` a `"azure"` per tutti i nodi e gruppi restituiti, consentendo al NodeLinkingService di identificare la provenienza dei dati +4. THE Azure_Plugin SHALL rispettare la configurazione `priority` per determinare l'ordine di precedenza nell'aggregazione dell'inventario + +### Requisito 10: Test unitari del plugin Azure + +**User Story:** Come sviluppatore, voglio che il plugin Azure sia coperto da test unitari, in modo da garantire la correttezza dell'implementazione e prevenire regressioni. + +#### Criteri di Accettazione + +1. THE Azure_Plugin SHALL avere test unitari che verifichino l'inizializzazione corretta con credenziali valide +2. THE Azure_Plugin SHALL avere test unitari che verifichino il comportamento con credenziali mancanti o non valide +3. THE Azure_Plugin SHALL avere test unitari che verifichino la mappatura corretta delle VM Azure in oggetti `Node` +4. THE Azure_Plugin SHALL avere test unitari che verifichino la creazione corretta dei `NodeGroup` per Resource Group, regione e tag +5. THE Azure_Plugin SHALL avere test unitari che verifichino il comportamento di degradazione graduale in caso di errori API +6. THE Azure_Plugin SHALL avere test unitari che verifichino le operazioni di ciclo di vita delle VM (start, stop, deallocate, restart) +7. FOR ALL le VM Azure restituite da `getInventory()`, la mappatura a `Node` e la successiva lettura tramite `getNodeFacts()` SHALL restituire dati coerenti con la VM originale (proprietà round-trip) diff --git a/.kiro/specs/missing-lifecycle-actions/.config.kiro b/.kiro/specs/missing-lifecycle-actions/.config.kiro new file mode 100644 index 00000000..1ebcf3e8 --- /dev/null +++ b/.kiro/specs/missing-lifecycle-actions/.config.kiro @@ -0,0 +1 @@ +{"specId": "4f667cdf-cf1b-4d2d-bb13-6f71b4887467", "workflowType": "requirements-first", "specType": "bugfix"} diff --git a/.kiro/specs/missing-lifecycle-actions/bugfix.md b/.kiro/specs/missing-lifecycle-actions/bugfix.md new file mode 100644 index 00000000..ae7b5986 --- /dev/null +++ b/.kiro/specs/missing-lifecycle-actions/bugfix.md @@ -0,0 +1,86 @@ +# Bugfix Requirements Document + +## Introduction + +On the "Manage" tab of a node detail page, users only see the "Destroy" action button. All other lifecycle actions (start, stop, shutdown, reboot, suspend, resume, snapshot) are missing. This effectively prevents users from managing the lifecycle of their Proxmox VMs and containers through the UI. + +The root cause is a broken status resolution chain in `NodeDetailPage.svelte`. The `ManageTab` component filters available actions based on `currentStatus`, but the status value passed to it resolves to `'unknown'` because the frontend `Node` interface does not declare `status` or `sourceData` fields. The property access chain `proxmoxMetadata?.status || proxmoxData?.status || currentStatus` silently fails, and since `'unknown'` only appears in the `destroy` action's availability list, only "Destroy" is shown. + +Additionally, `ManageTab` performs no case normalization on the status string before comparing it against the `actionAvailability` map, which expects lowercase values (`'running'`, `'stopped'`, `'suspended'`). If a status value arrives in a different case, it would also fail to match. + +## Bug Analysis + +### Current Behavior (Defect) + +1.1 WHEN a Proxmox node has a valid status (e.g., `running`, `stopped`, `suspended`) AND the user navigates to the Manage tab THEN the system resolves `currentStatus` to `'unknown'` because the `sourceData.proxmox` property path does not resolve correctly from the API response, causing the status fallback chain to terminate at the default `'unknown'` value. + +1.2 WHEN `currentStatus` is `'unknown'` THEN the system filters out all lifecycle actions except `destroy`, because `'unknown'` is only present in the `destroy` entry of the `actionAvailability` map. + +1.3 WHEN the backend returns a status value in an unexpected case (e.g., `'Running'` instead of `'running'`) THEN the system fails to match it against the lowercase keys in `actionAvailability`, resulting in no actions being displayed for that status. + +### Expected Behavior (Correct) + +2.1 WHEN a Proxmox node has a valid status (`running`, `stopped`, `suspended`, or `paused`) AND the user navigates to the Manage tab THEN the system SHALL correctly resolve the node's status from the API response data and pass it to the `ManageTab` component. + +2.2 WHEN `currentStatus` is a recognized Proxmox status (`running`, `stopped`, `suspended`, `paused`) THEN the system SHALL display all lifecycle actions whose `actionAvailability` entry includes that status (e.g., for `running`: stop, shutdown, reboot, suspend, snapshot, destroy). + +2.3 WHEN the backend returns a status value in any case variation THEN the system SHALL normalize the status to lowercase before comparing against the `actionAvailability` map. + +### Unchanged Behavior (Regression Prevention) + +3.1 WHEN `currentStatus` is genuinely `'unknown'` (no Proxmox status data available) THEN the system SHALL CONTINUE TO show only the `destroy` action, as this is the correct fallback behavior. + +3.2 WHEN a destructive action (destroy) is selected THEN the system SHALL CONTINUE TO display a confirmation dialog before executing the action. + +3.3 WHEN an action is executed successfully THEN the system SHALL CONTINUE TO call `onStatusChange` to refresh the node data and update the displayed actions accordingly. + +3.4 WHEN the node is not a Proxmox node (no `sourceData.proxmox`) THEN the system SHALL CONTINUE TO fall back to the node's top-level status or `'unknown'`. + +--- + +### Bug Condition + +```pascal +FUNCTION isBugCondition(X) + INPUT: X of type NodeDetailPageInput (node data from API response) + OUTPUT: boolean + + // The bug triggers when the node has a valid Proxmox status but the + // frontend status resolution chain fails to extract it, resulting in 'unknown' + LET apiStatus = X.sourceData?.proxmox?.status + OR X.sourceData?.proxmox?.metadata?.status + OR X.status + LET resolvedStatus = frontendResolveStatus(X) + + RETURN apiStatus IN {'running', 'stopped', 'suspended', 'paused'} + AND resolvedStatus = 'unknown' +END FUNCTION +``` + +### Fix Checking Property + +```pascal +// Property: Fix Checking - Status Resolution +FOR ALL X WHERE isBugCondition(X) DO + resolvedStatus ← resolveStatus'(X) + ASSERT resolvedStatus IN {'running', 'stopped', 'suspended', 'paused'} + ASSERT resolvedStatus = normalizeCase(X.actualProxmoxStatus) +END FOR + +// Property: Fix Checking - Action Visibility +FOR ALL X WHERE isBugCondition(X) DO + actions ← displayableActions'(X) + ASSERT |actions| > 1 + ASSERT actions CONTAINS relevant actions for resolvedStatus +END FOR +``` + +### Preservation Checking Property + +```pascal +// Property: Preservation Checking +FOR ALL X WHERE NOT isBugCondition(X) DO + ASSERT resolveStatus(X) = resolveStatus'(X) + ASSERT displayableActions(X) = displayableActions'(X) +END FOR +``` diff --git a/.kiro/specs/pabawi-release-1-0-0/.config.kiro b/.kiro/specs/pabawi-release-1-0-0/.config.kiro new file mode 100644 index 00000000..2357e983 --- /dev/null +++ b/.kiro/specs/pabawi-release-1-0-0/.config.kiro @@ -0,0 +1 @@ +{"specId": "7180cfda-5731-4cef-b7ea-4f96d9dd8b2c", "workflowType": "design-first", "specType": "feature"} diff --git a/.kiro/specs/pabawi-release-1-0-0/design.md b/.kiro/specs/pabawi-release-1-0-0/design.md new file mode 100644 index 00000000..2bd24006 --- /dev/null +++ b/.kiro/specs/pabawi-release-1-0-0/design.md @@ -0,0 +1,1180 @@ +# Design Document: Pabawi Release 1.0.0 + +## Overview + +Pabawi 1.0.0 is a major release introducing six foundational feature areas that transform the application from a single-database, environment-variable-configured tool into a multi-database, plugin-extensible, journal-aware infrastructure management platform. The release covers: (1) full database abstraction to support PostgreSQL alongside SQLite, (2) an AWS plugin with EC2 focus following the existing plugin architecture, (3) improved Proxmox VM/container provisioning UX with clearer separation in the UI while keeping a single plugin, (4) local database storage for integration configurations per user, (5) a nodes journal for tracking provisioning events, lifecycle actions, and manual notes, and (6) improved RBAC with finer-grained permissions per integration activity. + +These features are deeply interconnected. The database abstraction layer is foundational — it must be in place before integration configs can be stored in DB, before the journal can persist events, and before new RBAC permissions can be seeded. The AWS plugin and Proxmox improvements extend the plugin architecture. The journal consumes events from all integrations. The improved RBAC gates access to all new and existing features. + +Each cloud/hypervisor integration is modeled as a single plugin that handles its own compute types internally. Proxmox remains one plugin managing both VMs and containers (with better internal separation and UI clarity). AWS starts as one plugin focused on EC2, with room to grow into container services (ECS/Fargate) later. This avoids plugin proliferation and keeps the integration model consistent. + +The stack remains Svelte 5 + Vite frontend, Node.js + TypeScript + Express backend. TypeScript is used throughout for all code examples and interfaces. + +## Architecture + +### Current Architecture + +```mermaid +graph TD + FE[Svelte 5 Frontend] -->|REST API| BE[Express Backend] + BE --> DBS[DatabaseService] + DBS --> SQLite[(SQLite via sqlite3)] + BE --> CS[ConfigService] + CS --> ENV[.env File] + BE --> IM[IntegrationManager] + IM --> Bolt[BoltPlugin] + IM --> Ansible[AnsiblePlugin] + IM --> PDB[PuppetDBService] + IM --> PS[PuppetserverPlugin] + IM --> Hiera[HieraPlugin] + IM --> Proxmox[ProxmoxIntegration] + IM --> SSH[SSHPlugin] + BE --> RBAC[PermissionService] + RBAC --> SQLite +``` + +### Target Architecture (1.0.0) + +```mermaid +graph TD + FE[Svelte 5 Frontend] -->|REST API| BE[Express Backend] + BE --> DAL[DatabaseAdapter] + DAL --> SQLiteA[SQLiteAdapter] + DAL --> PgA[PostgresAdapter] + SQLiteA --> SQLite[(SQLite)] + PgA --> PG[(PostgreSQL)] + BE --> CS2[ConfigService v2] + CS2 --> ENV2[.env File] + CS2 --> DAL + BE --> IM2[IntegrationManager] + IM2 --> Bolt2[BoltPlugin] + IM2 --> Ansible2[AnsiblePlugin] + IM2 --> PDB2[PuppetDBService] + IM2 --> PS2[PuppetserverPlugin] + IM2 --> Hiera2[HieraPlugin] + IM2 --> Proxmox2[ProxmoxIntegration v2] + IM2 --> SSH2[SSHPlugin] + IM2 --> AWS[AWSPlugin] + BE --> RBAC2[PermissionService v2] + RBAC2 --> DAL + BE --> Journal[JournalService] + Journal --> DAL + BE --> ICS[IntegrationConfigService] + ICS --> DAL +``` + +## Sequence Diagrams + +### Feature 1: Database Abstraction — Query Flow + +```mermaid +sequenceDiagram + participant S as Service (e.g. UserService) + participant DA as DatabaseAdapter + participant SA as SQLiteAdapter + participant PA as PostgresAdapter + participant DB as Database + + S->>DA: query(sql, params) + alt DB_TYPE=sqlite + DA->>SA: query(sql, params) + SA->>DB: sqlite3.all(sql, params) + DB-->>SA: rows + SA-->>DA: rows + else DB_TYPE=postgres + DA->>PA: query(sql, params) + PA->>DB: pool.query(sql, params) + DB-->>PA: rows + PA-->>DA: rows + end + DA-->>S: rows +``` + +### Feature 2: AWS Plugin — EC2 Provisioning Flow + +```mermaid +sequenceDiagram + participant UI as Frontend + participant API as Express API + participant IM as IntegrationManager + participant AWS as AWSPlugin + participant EC2 as AWS EC2 SDK + participant J as JournalService + + UI->>API: POST /api/integrations/aws/provision + API->>IM: executeAction("aws", action) + IM->>AWS: executeAction(action) + AWS->>EC2: runInstances(params) + EC2-->>AWS: instanceId, status + AWS->>J: recordEvent(provision, instanceId) + AWS-->>IM: ExecutionResult + IM-->>API: result + API-->>UI: 200 OK {instanceId} +``` + +### Feature 3: Proxmox VM/Container Provisioning (Single Plugin) + +```mermaid +sequenceDiagram + participant UI as Frontend + participant API as Express API + participant IM as IntegrationManager + participant PX as ProxmoxIntegration + participant PVE as Proxmox API + + UI->>API: POST /api/integrations/proxmox/provision {type: "vm"} + API->>IM: executeAction("proxmox", action) + IM->>PX: executeAction(action) + PX->>PX: route by action.metadata.computeType + alt computeType = "vm" + PX->>PVE: POST /nodes/{node}/qemu + else computeType = "lxc" + PX->>PVE: POST /nodes/{node}/lxc + end + PVE-->>PX: UPID + PX-->>IM: ExecutionResult + IM-->>API: result + API-->>UI: 200 OK +``` + +### Feature 4: Integration Config Storage Flow + +```mermaid +sequenceDiagram + participant UI as Setup Page + participant API as Express API + participant ICS as IntegrationConfigService + participant DA as DatabaseAdapter + participant CS as ConfigService + + UI->>API: PUT /api/config/integrations/proxmox + API->>ICS: saveConfig(userId, "proxmox", config) + ICS->>ICS: encrypt(sensitiveFields) + ICS->>DA: upsert integration_configs + DA-->>ICS: ok + ICS-->>API: saved + API-->>UI: 200 OK + + Note over CS,ICS: On startup, ConfigService merges .env + DB configs + CS->>ICS: getActiveConfigs() + ICS->>DA: SELECT * FROM integration_configs + DA-->>ICS: rows + ICS-->>CS: configs (decrypted) + CS->>CS: merge with .env (DB overrides .env) +``` + +### Feature 5: Nodes Journal — Event Recording + +```mermaid +sequenceDiagram + participant Plugin as Any Plugin + participant J as JournalService + participant DA as DatabaseAdapter + participant PDB as PuppetDB (live) + + Plugin->>J: recordEvent({nodeId, type, source, data}) + J->>DA: INSERT INTO journal_entries + DA-->>J: ok + + Note over J,PDB: Journal also aggregates live sources + J->>PDB: getNodeEvents(nodeId) + PDB-->>J: puppet events[] + J->>DA: SELECT * FROM journal_entries WHERE nodeId=? + DA-->>J: db events[] + J->>J: merge + sort by timestamp + J-->>Plugin: unified timeline[] +``` + +### Feature 6: Improved RBAC — Permission Check + +```mermaid +sequenceDiagram + participant MW as Auth Middleware + participant PS as PermissionService + participant DA as DatabaseAdapter + + MW->>PS: hasPermission(userId, "proxmox", "provision") + PS->>PS: check cache + alt cache hit + PS-->>MW: true/false + else cache miss + PS->>DA: query user→roles→permissions + DA-->>PS: permissions[] + PS->>PS: update cache + PS-->>MW: true/false + end +``` + +## Components and Interfaces + +### Component 1: DatabaseAdapter (Feature 1) + +**Purpose**: Abstract database operations behind a common interface so services are database-agnostic. + +```typescript +interface DatabaseAdapter { + // Core query operations + query(sql: string, params?: unknown[]): Promise; + queryOne(sql: string, params?: unknown[]): Promise; + execute(sql: string, params?: unknown[]): Promise<{ changes: number }>; + + // Transaction support + beginTransaction(): Promise; + commit(): Promise; + rollback(): Promise; + withTransaction(fn: () => Promise): Promise; + + // Connection lifecycle + initialize(): Promise; + close(): Promise; + isConnected(): boolean; + + // Migration support + runMigrations(): Promise; + getMigrationStatus(): Promise<{ applied: MigrationInfo[]; pending: MigrationInfo[] }>; + + // Dialect helpers + getDialect(): "sqlite" | "postgres"; + getPlaceholder(index: number): string; // Returns '?' for SQLite, '$N' for Postgres +} +``` + +**Responsibilities**: + +- Provide a unified query interface for all services +- Handle parameter placeholder differences (`?` vs `$1`) +- Manage connection pooling (Postgres) or single connection (SQLite) +- Run dialect-aware migrations +- Provide transaction support with automatic rollback on error + +### Component 2: AWSPlugin (Feature 2) + +**Purpose**: Integrate AWS EC2 into Pabawi following the existing plugin architecture. + +```typescript +class AWSPlugin extends BasePlugin implements ExecutionToolPlugin, InformationSourcePlugin { + readonly type = "both" as const; + + // InformationSourcePlugin + getInventory(): Promise; // List EC2 instances as nodes + getGroups(): Promise; // Group by region, VPC, tags + getNodeFacts(nodeId: string): Promise; // Instance metadata as facts + getNodeData(nodeId: string, dataType: string): Promise; + + // ExecutionToolPlugin + executeAction(action: Action): Promise; + listCapabilities(): Capability[]; + listProvisioningCapabilities(): ProvisioningCapability[]; + + // AWS-specific + getRegions(): Promise; + getInstanceTypes(region?: string): Promise; + getAMIs(region: string, filters?: AMIFilter[]): Promise; + getVPCs(region: string): Promise; + getSubnets(region: string, vpcId?: string): Promise; + getSecurityGroups(region: string, vpcId?: string): Promise; + getKeyPairs(region: string): Promise; +} +``` + +**Responsibilities**: + +- Discover EC2 instances and present them as inventory nodes +- Execute lifecycle actions (start, stop, reboot, terminate) +- Provision new EC2 instances with configurable parameters +- Group instances by region, VPC, security group, and tags +- Provide instance metadata as node facts + +### Component 3: ProxmoxIntegration v2 (Feature 3) + +**Purpose**: Enhance the existing ProxmoxIntegration with clearer internal separation between VM and container operations, and improved provisioning UX. Remains a single plugin registered with IntegrationManager. + +```typescript +class ProxmoxIntegration extends BasePlugin implements ExecutionToolPlugin, InformationSourcePlugin { + readonly type = "both" as const; + private service: ProxmoxService; + + // Existing interface (unchanged) + getInventory(): Promise; // Returns both VMs and containers + getGroups(): Promise; // Groups by node, status, type (vm/lxc) + getNodeFacts(nodeId: string): Promise; + executeAction(action: Action): Promise; + listCapabilities(): Capability[]; + listProvisioningCapabilities(): ProvisioningCapability[]; + + // Enhanced: compute-type-aware provisioning helpers + getNodes(): Promise; + getNextVMID(): Promise; + getISOImages(node: string, storage?: string): Promise; + getTemplates(node: string, storage?: string): Promise; + getStorages(node: string, contentType?: string): Promise; + getNetworkBridges(node: string, type?: string): Promise; +} + +// ProxmoxService enhanced with explicit compute type routing +class ProxmoxService { + // Provisioning with explicit compute type + createVM(node: string, params: VMCreateParams): Promise; + createLXC(node: string, params: LXCCreateParams): Promise; + + // Inventory with type filtering + getInventory(computeType?: "qemu" | "lxc"): Promise; + + // Lifecycle actions route internally by guest type + executeLifecycleAction(node: string, vmid: number, action: string): Promise; +} +``` + +**Responsibilities**: + +- Single plugin registration with IntegrationManager (name: "proxmox") +- Internal routing between VM and container operations based on action metadata or guest type +- Separate provisioning forms in UI for VMs vs containers, but same backend plugin +- Inventory returns all guests with a `computeType` field ("vm" | "lxc") for UI filtering +- Groups include type-based groups (e.g., "Proxmox VMs", "Proxmox Containers") +- Shared authentication, connection pooling, and health checks + +### Component 4: IntegrationConfigService (Feature 4) + +**Purpose**: Store and retrieve integration configurations in the database, per user. + +```typescript +class IntegrationConfigService { + saveConfig(userId: string, integrationName: string, config: Record): Promise; + getConfig(userId: string, integrationName: string): Promise; + getActiveConfigs(): Promise; + deleteConfig(userId: string, integrationName: string): Promise; + listConfigs(userId: string): Promise; + getEffectiveConfig(integrationName: string): Promise>; +} + +interface IntegrationConfigRecord { + id: string; + userId: string; + integrationName: string; + config: Record; // Sensitive fields encrypted at rest + isActive: boolean; + createdAt: string; + updatedAt: string; +} +``` + +**Responsibilities**: + +- CRUD operations for integration configs stored in DB +- Encrypt sensitive fields (tokens, passwords, keys) at rest +- Merge DB configs with .env configs (DB takes precedence when active) +- Per-user config ownership with admin override capability +- Validate configs against integration-specific Zod schemas + +### Component 5: JournalService (Feature 5) + +**Purpose**: Record and retrieve a unified timeline of events for inventory nodes. + +```typescript +class JournalService { + // Write operations + recordEvent(entry: CreateJournalEntry): Promise; + addNote(nodeId: string, userId: string, content: string): Promise; + + // Read operations + getNodeTimeline(nodeId: string, options?: TimelineOptions): Promise; + getRecentEvents(options?: RecentEventsOptions): Promise; + searchEntries(query: string, options?: SearchOptions): Promise; + + // Live source aggregation + aggregateTimeline(nodeId: string, options?: TimelineOptions): Promise; +} + +interface JournalEntry { + id: string; + nodeId: string; + nodeUri: string; + eventType: JournalEventType; + source: JournalSource; + action: string; + summary: string; + details: Record; + userId?: string; + timestamp: string; + isLive: boolean; // true = fetched from live source, false = stored in DB +} + +type JournalEventType = + | "provision" | "destroy" | "start" | "stop" | "reboot" | "suspend" | "resume" + | "command_execution" | "task_execution" | "puppet_run" | "package_install" + | "config_change" | "note" | "error" | "warning" | "info"; + +type JournalSource = "proxmox" | "aws" | "bolt" | "ansible" + | "ssh" | "puppetdb" | "user" | "system"; +``` + +**Responsibilities**: + +- Persist provisioning events, lifecycle actions, execution results to DB +- Aggregate live events from PuppetDB (reports, events) with stored events +- Support manual user notes attached to nodes +- Provide filtered, paginated timeline views +- Emit events that other services can subscribe to + +### Component 6: Improved RBAC (Feature 6) + +**Purpose**: Extend the permission model with finer-grained, integration-activity-level permissions. + +```typescript +// New permission granularity model +// Current: resource:action (e.g., "proxmox:execute") +// New: resource:sub_resource:action (e.g., "proxmox_vm:provision:execute") + +// New permission resources for 1.0.0 +type PermissionResource = + // Proxmox (single plugin, granular actions) + | "proxmox" // Proxmox operations (VMs and containers) + // AWS + | "aws" // AWS operations (EC2 initially, extensible) + // Journal + | "journal" // Journal read/write + // Integration config + | "integration_config" // Config management + // Existing (unchanged) + | "ansible" | "bolt" | "puppetdb" | "users" | "groups" | "roles"; + +// New fine-grained actions +type PermissionAction = + | "read" | "write" | "execute" | "admin" + // New granular actions + | "provision" // Create new resources + | "destroy" // Destroy/decommission resources + | "lifecycle" // Start/stop/reboot/suspend/resume + | "configure" // Modify integration settings + | "note" // Add journal notes + | "export"; // Export data + +// New built-in roles +interface BuiltInRoles { + Viewer: Permission[]; // read on all resources + Operator: Permission[]; // read + execute + lifecycle on all + Provisioner: Permission[]; // NEW: read + provision + destroy + lifecycle on infra + Administrator: Permission[]; // all permissions +} +``` + +**Responsibilities**: + +- Extend permissions table with new resource:action pairs +- Add new built-in "Provisioner" role +- Seed new permissions via migration +- Update PermissionService to handle new granularity +- Update auth middleware to check new permission types +- Provide UI for managing granular role-permission assignments + +## Data Models + +### Database Abstraction Tables (Feature 1) + +No new tables — this feature changes how existing tables are accessed. The `DatabaseAdapter` interface replaces direct `sqlite3.Database` usage. + +**Migration Strategy**: + +- All existing `.sql` migrations remain as-is (SQLite dialect) +- New migration files are created in pairs: `NNN_name.sqlite.sql` and `NNN_name.postgres.sql` +- MigrationRunner selects the correct dialect file based on `DatabaseAdapter.getDialect()` +- For shared SQL (no dialect differences), a single `NNN_name.sql` file works for both + +### Integration Configs Table (Feature 4) + +```sql +CREATE TABLE integration_configs ( + id TEXT PRIMARY KEY, + userId TEXT NOT NULL, + integrationName TEXT NOT NULL, + config TEXT NOT NULL, -- JSON, sensitive fields encrypted + isActive INTEGER NOT NULL DEFAULT 1, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL, + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, + UNIQUE(userId, integrationName) +); + +CREATE INDEX idx_integration_configs_user ON integration_configs(userId); +CREATE INDEX idx_integration_configs_name ON integration_configs(integrationName); +CREATE INDEX idx_integration_configs_active ON integration_configs(isActive); +``` + +**Validation Rules**: + +- `integrationName` must match a registered plugin name +- `config` JSON must validate against the integration's Zod schema +- Sensitive fields (matching patterns: `*token*`, `*password*`, `*secret*`, `*key*`) are encrypted with AES-256-GCM before storage +- One active config per integration per user (UNIQUE constraint) + +### Journal Entries Table (Feature 5) + +```sql +CREATE TABLE journal_entries ( + id TEXT PRIMARY KEY, + nodeId TEXT NOT NULL, + nodeUri TEXT NOT NULL, + eventType TEXT NOT NULL, + source TEXT NOT NULL, + action TEXT NOT NULL, + summary TEXT NOT NULL, + details TEXT, -- JSON + userId TEXT, + timestamp TEXT NOT NULL, + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE SET NULL +); + +CREATE INDEX idx_journal_node ON journal_entries(nodeId); +CREATE INDEX idx_journal_timestamp ON journal_entries(timestamp DESC); +CREATE INDEX idx_journal_type ON journal_entries(eventType); +CREATE INDEX idx_journal_source ON journal_entries(source); +CREATE INDEX idx_journal_node_time ON journal_entries(nodeId, timestamp DESC); +``` + +**Validation Rules**: + +- `eventType` must be one of the defined `JournalEventType` values +- `source` must be one of the defined `JournalSource` values +- `timestamp` must be ISO 8601 format +- `details` must be valid JSON if provided +- `nodeUri` follows the existing format: `{source}:{identifier}` + +### New Permissions Seed Data (Feature 6) + +```sql +-- Proxmox permissions (enhanced granularity, single plugin) +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('proxmox-read-001', 'proxmox', 'read', 'View Proxmox VMs and containers', datetime('now')), + ('proxmox-lifecycle-001', 'proxmox', 'lifecycle', 'Start/stop/reboot VMs and containers', datetime('now')), + ('proxmox-provision-001', 'proxmox', 'provision', 'Create new VMs and containers', datetime('now')), + ('proxmox-destroy-001', 'proxmox', 'destroy', 'Destroy/decommission VMs and containers', datetime('now')), + ('proxmox-admin-001', 'proxmox', 'admin', 'Full Proxmox management', datetime('now')); + +-- AWS permissions (single plugin, EC2 initially) +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('aws-read-001', 'aws', 'read', 'View AWS resources', datetime('now')), + ('aws-lifecycle-001', 'aws', 'lifecycle', 'Start/stop/reboot AWS instances', datetime('now')), + ('aws-provision-001', 'aws', 'provision', 'Launch new AWS resources', datetime('now')), + ('aws-destroy-001', 'aws', 'destroy', 'Terminate AWS resources', datetime('now')), + ('aws-admin-001', 'aws', 'admin', 'Full AWS management', datetime('now')); + +-- Journal permissions +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('journal-read-001', 'journal', 'read', 'View journal entries', datetime('now')), + ('journal-note-001', 'journal', 'note', 'Add manual notes', datetime('now')), + ('journal-admin-001', 'journal', 'admin', 'Manage journal entries', datetime('now')); + +-- Integration config permissions +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('integration_config-read-001', 'integration_config', 'read', 'View integration configs', datetime('now')), + ('integration_config-configure-001', 'integration_config', 'configure', 'Modify integration configs', datetime('now')), + ('integration_config-admin-001', 'integration_config', 'admin', 'Full config management', datetime('now')); + +-- New Provisioner role +INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES + ('role-provisioner-001', 'Provisioner', 'Provision and manage infrastructure resources', 1, datetime('now'), datetime('now')); +``` + +## Key Functions with Formal Specifications + +### Function 1: DatabaseAdapter.query() + +```typescript +async query(sql: string, params?: unknown[]): Promise +``` + +**Preconditions:** + +- `sql` is a non-empty string containing valid SQL for the active dialect +- `params` array length matches the number of placeholders in `sql` +- Adapter is initialized (`isConnected() === true`) + +**Postconditions:** + +- Returns array of typed rows (may be empty) +- No mutations to input parameters +- Connection remains valid after call +- If SQL is invalid, throws `DatabaseQueryError` with dialect-specific message + +**Loop Invariants:** N/A + +### Function 2: DatabaseAdapter.withTransaction() + +```typescript +async withTransaction(fn: () => Promise): Promise +``` + +**Preconditions:** + +- Adapter is initialized and connected +- No nested transaction is already active (SQLite limitation) + +**Postconditions:** + +- If `fn` resolves: transaction is committed, returns `fn` result +- If `fn` rejects: transaction is rolled back, throws original error +- Database state is consistent regardless of outcome + +**Loop Invariants:** N/A + +### Function 3: IntegrationConfigService.getEffectiveConfig() + +```typescript +async getEffectiveConfig(integrationName: string): Promise> +``` + +**Preconditions:** + +- `integrationName` is a non-empty string matching a registered plugin +- DatabaseAdapter is initialized + +**Postconditions:** + +- Returns merged config: `.env` values as base, DB values override when `isActive === true` +- Sensitive fields in returned config are decrypted +- If no DB config exists, returns `.env` config only +- If no `.env` config exists, returns DB config only +- If neither exists, returns empty object + +**Loop Invariants:** N/A + +### Function 4: JournalService.aggregateTimeline() + +```typescript +async aggregateTimeline(nodeId: string, options?: TimelineOptions): Promise +``` + +**Preconditions:** + +- `nodeId` is a non-empty string +- If `options.startDate` and `options.endDate` provided, `startDate <= endDate` + +**Postconditions:** + +- Returns merged array of DB-stored events and live-source events +- All entries have `isLive` correctly set (`true` for live-fetched, `false` for DB-stored) +- Result is sorted by `timestamp` descending +- If a live source fails, DB events are still returned (graceful degradation) +- Result respects `options.limit` and `options.offset` if provided + +**Loop Invariants:** + +- During merge: all processed entries maintain descending timestamp order + +### Function 5: AWSPlugin.executeAction() + +```typescript +async executeAction(action: Action): Promise +``` + +**Preconditions:** + +- Plugin is initialized (`isInitialized() === true`) +- `action.type` is one of: "command" (lifecycle), "task" (provision/destroy) +- `action.target` is a valid EC2 instance ID or "new" for provisioning +- AWS credentials are valid and have sufficient IAM permissions + +**Postconditions:** + +- Returns `ExecutionResult` with `status: "success" | "failed"` +- On success: `result.output` contains instance details +- On failure: `result.error` contains descriptive AWS error message +- A journal entry is recorded for the action regardless of outcome +- No partial state: provisioning either completes or is cleaned up + +**Loop Invariants:** N/A + +## Algorithmic Pseudocode + +### Algorithm: Database Adapter Factory + +```typescript +function createDatabaseAdapter(config: AppConfig): DatabaseAdapter { + const dbType = process.env.DB_TYPE ?? "sqlite"; + + if (dbType === "postgres") { + const connectionString = process.env.DATABASE_URL; + assert(connectionString !== undefined, "DATABASE_URL required for postgres"); + return new PostgresAdapter(connectionString); + } + + // Default: SQLite + const dbPath = config.databasePath; + return new SQLiteAdapter(dbPath); +} +``` + +**Preconditions:** + +- `config` is a valid AppConfig +- If `DB_TYPE=postgres`, `DATABASE_URL` environment variable is set + +**Postconditions:** + +- Returns an uninitialized adapter of the correct type +- Caller must call `adapter.initialize()` before use + +### Algorithm: Config Merge Strategy + +```typescript +function mergeConfigs( + envConfig: Record, + dbConfig: Record | null +): Record { + // Base: environment config + const result = { ...envConfig }; + + // If no DB config, return env-only + if (dbConfig === null) return result; + + // DB values override env values (shallow merge) + for (const [key, value] of Object.entries(dbConfig)) { + if (value !== null && value !== undefined) { + result[key] = value; + } + } + + return result; +} +``` + +**Preconditions:** + +- `envConfig` is a valid config object (may be empty) +- `dbConfig` is either null or a valid config object + +**Postconditions:** + +- Result contains all keys from envConfig +- For overlapping keys, dbConfig values take precedence (if non-null) +- Neither input is mutated + +### Algorithm: Journal Timeline Aggregation + +```typescript +async function aggregateTimeline( + nodeId: string, + dbAdapter: DatabaseAdapter, + liveSources: Map, + options?: TimelineOptions +): Promise { + // Step 1: Fetch DB-stored events + const dbEntries = await dbAdapter.query( + "SELECT * FROM journal_entries WHERE nodeId = ? ORDER BY timestamp DESC", + [nodeId] + ); + + // Step 2: Fetch live events from each source (parallel, fault-tolerant) + const liveEntries: JournalEntry[] = []; + const livePromises = Array.from(liveSources.entries()).map( + async ([name, source]) => { + try { + if (!source.isInitialized()) return []; + const events = await source.getNodeData(nodeId, "events"); + if (!Array.isArray(events)) return []; + return events.map(e => transformToJournalEntry(e, name, true)); + } catch { + // Graceful degradation: skip failed sources + return []; + } + } + ); + + const liveResults = await Promise.all(livePromises); + for (const entries of liveResults) { + liveEntries.push(...entries); + } + + // Step 3: Merge and sort + const allEntries = [...dbEntries.map(e => ({ ...e, isLive: false })), ...liveEntries]; + allEntries.sort((a, b) => b.timestamp.localeCompare(a.timestamp)); + + // Step 4: Apply pagination + const offset = options?.offset ?? 0; + const limit = options?.limit ?? 50; + return allEntries.slice(offset, offset + limit); +} +``` + +**Preconditions:** + +- `nodeId` is non-empty +- `dbAdapter` is initialized +- `liveSources` may be empty + +**Postconditions:** + +- Returns merged, sorted, paginated timeline +- DB entries have `isLive: false`, live entries have `isLive: true` +- Failed live sources do not prevent DB entries from being returned + +**Loop Invariants:** + +- After each live source fetch: `liveEntries` contains only valid JournalEntry objects +- After merge: `allEntries` is sorted by timestamp descending + +### Algorithm: Service Migration from sqlite3 to DatabaseAdapter + +```typescript +// BEFORE (current pattern in UserService, RoleService, etc.) +class UserService { + private db: Database; // sqlite3.Database + + private runQuery(sql: string, params: unknown[]): Promise { + return new Promise((resolve, reject) => { + this.db.run(sql, params, (err) => { + if (err) reject(err); else resolve(); + }); + }); + } +} + +// AFTER (migrated pattern) +class UserService { + private db: DatabaseAdapter; + + async createUser(data: CreateUserDTO): Promise { + // Placeholder style is handled by adapter + await this.db.execute( + "INSERT INTO users (id, username, email, ...) VALUES (?, ?, ?, ...)", + [userId, data.username, data.email, ...] + ); + return await this.db.queryOne("SELECT * FROM users WHERE id = ?", [userId]); + } +} +``` + +**Migration approach:** + +- Replace `private db: Database` with `private db: DatabaseAdapter` in all services +- Replace `runQuery/getQuery/allQuery` helper methods with `db.execute/db.queryOne/db.query` +- The adapter handles placeholder translation internally +- Services affected: UserService, RoleService, GroupService, PermissionService, SetupService, ExecutionRepository, AuthenticationService, AuditLoggingService, BatchExecutionService, PuppetRunHistoryService, ReportFilterService + +## Example Usage + +### Example 1: Database Adapter initialization + +```typescript +// In server.ts startup +import { createDatabaseAdapter } from "./database/AdapterFactory"; + +const dbAdapter = createDatabaseAdapter(config); +await dbAdapter.initialize(); +await dbAdapter.runMigrations(); + +// Pass adapter to services instead of raw sqlite3.Database +const userService = new UserService(dbAdapter, authService); +const roleService = new RoleService(dbAdapter); +const journalService = new JournalService(dbAdapter); +``` + +### Example 2: AWS Plugin registration + +```typescript +// In IntegrationManager setup +const awsConfig = configService.getAWSConfig(); +if (awsConfig?.enabled) { + const awsPlugin = new AWSPlugin(logger, performanceMonitor); + integrationManager.registerPlugin(awsPlugin, { + enabled: true, + name: "aws", + type: "both", + config: awsConfig, + }); +} +``` + +### Example 3: Proxmox plugin registration (unchanged pattern) + +```typescript +// ProxmoxIntegration remains a single plugin — same registration as before +const proxmoxPlugin = new ProxmoxIntegration(logger, performanceMonitor); +integrationManager.registerPlugin(proxmoxPlugin, { + enabled: true, name: "proxmox", type: "both", config: proxmoxConfig, +}); +// Internally, ProxmoxService routes VM vs LXC operations based on guest type +``` + +### Example 4: Saving integration config from UI + +```typescript +// PUT /api/config/integrations/:name +router.put("/:name", authMiddleware, async (req, res) => { + const { name } = req.params; + const userId = req.user.id; + + // Check permission + const canConfigure = await permissionService.hasPermission( + userId, "integration_config", "configure" + ); + if (!canConfigure) return res.status(403).json({ error: "Forbidden" }); + + await integrationConfigService.saveConfig(userId, name, req.body); + res.json({ message: "Configuration saved" }); +}); +``` + +### Example 5: Recording a journal event from a plugin + +```typescript +// Inside AWSPlugin.executeAction after successful provisioning +const journalEntry: CreateJournalEntry = { + nodeId: `aws:${instanceId}`, + nodeUri: `aws:${region}:${instanceId}`, + eventType: "provision", + source: "aws", + action: "runInstances", + summary: `Provisioned EC2 instance ${instanceId} (${instanceType})`, + details: { instanceId, instanceType, region, ami, vpcId }, + userId: action.metadata?.userId as string, +}; +await this.journalService.recordEvent(journalEntry); +``` + +### Example 6: Checking granular RBAC permission + +```typescript +// Middleware checking if user can provision on Proxmox +const canProvision = await permissionService.hasPermission( + userId, "proxmox", "provision" +); + +// Proxmox plugin handles VM vs LXC internally — permission is at the integration level +if (!canProvision) { + throw new Error("Insufficient permissions to provision on Proxmox"); +} + +// AWS uses the same pattern +const canProvisionAWS = await permissionService.hasPermission( + userId, "aws", "provision" +); +``` + +## Correctness Properties + +*A property is a characteristic or behavior that should hold true across all valid executions of a system — essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.* + +### Property 1: Database Adapter Query Consistency + +*For any* valid SQL query `q` and parameters `p`, executing `SQLiteAdapter.query(q, p)` and `PostgresAdapter.query(q, p)` on identical data sets should return structurally identical result arrays. + +**Validates: Requirements 1.6, 1.7** + +### Property 2: Placeholder Dialect Correctness + +*For any* positive integer N, `SQLiteAdapter.getPlaceholder(N)` should return `"?"` and `PostgresAdapter.getPlaceholder(N)` should return `"$N"`. + +**Validates: Requirements 1.5, 2.4, 3.3** + +### Property 3: Transaction Atomicity + +*For any* set of database mutations wrapped in `withTransaction(fn)`, if `fn` resolves then all mutations are committed and visible, and if `fn` rejects then no mutations are visible. No partial state is observable. + +**Validates: Requirements 7.1, 7.2** + +### Property 4: Migration Idempotency + +*For any* database and set of migration files, running `runMigrations()` multiple times produces the same schema state. Already-applied migrations are skipped on subsequent runs. + +**Validates: Requirements 5.4, 5.5** + +### Property 5: Migration Dialect Selection + +*For any* set of migration files containing dialect-specific variants (`.sqlite.sql`, `.postgres.sql`) and shared files (`.sql`), the MigrationRunner selects only files matching the active adapter's dialect. + +**Validates: Requirement 5.3** + +### Property 6: Config Merge Determinism + +*For any* `.env` config object and database config object, `getEffectiveConfig(name)` returns a merged result where database values override `.env` values for overlapping non-null keys, and all non-overlapping keys from both sources are preserved. + +**Validates: Requirements 19.1, 19.2** + +### Property 7: Encryption Round-Trip + +*For any* string value `v`, `decrypt(encrypt(v)) === v`. Sensitive fields stored in `integration_configs.config` survive the encrypt-store-retrieve-decrypt cycle without data loss. + +**Validates: Requirements 18.4, 18.5, 20.3** + +### Property 8: Integration Config CRUD Round-Trip + +*For any* valid integration config object, saving it via `IntegrationConfigService.saveConfig` and then retrieving it via `getConfig` for the same user and integration name should return an equivalent config (with sensitive fields decrypted). + +**Validates: Requirement 18.1** + +### Property 9: Sensitive Field Encryption at Rest + +*For any* config containing fields matching sensitive patterns (`*token*`, `*password*`, `*secret*`, `*key*`), the raw database value for those fields should not equal the plaintext value after `saveConfig` is called. + +**Validates: Requirement 18.4** + +### Property 10: Journal Entry Completeness + +*For any* completed action (provisioning, lifecycle, or execution) regardless of success or failure, exactly one journal entry is recorded containing nodeId, nodeUri, eventType, source, action, summary, and timestamp. + +**Validates: Requirements 10.4, 11.4, 22.1, 22.2, 22.3, 22.4** + +### Property 11: Journal Timeline Sort Order + +*For any* set of journal entries (both database-stored and live-fetched), `aggregateTimeline(nodeId)` returns entries sorted by timestamp in descending order. + +**Validates: Requirement 23.3** + +### Property 12: Journal isLive Flag Correctness + +*For any* aggregated timeline, entries originating from the database have `isLive === false` and entries fetched from live sources have `isLive === true`. + +**Validates: Requirement 23.2** + +### Property 13: Journal Pagination Bounds + +*For any* timeline aggregation with limit L and offset O, the result contains at most L entries. + +**Validates: Requirement 23.5** + +### Property 14: Journal Source Validation + +*For any* string value not in the set {proxmox, aws, bolt, ansible, ssh, puppetdb, user, system}, attempting to record a journal entry with that source should fail validation. For any value in the set, recording should succeed. + +**Validates: Requirements 25.3, 26.3** + +### Property 15: Proxmox Compute Type Partition + +*For any* Proxmox inventory, every item has a `computeType` field equal to `"vm"` or `"lxc"`. Filtering by `computeType === "vm"` and `computeType === "lxc"` produces disjoint sets whose union equals the full unfiltered inventory. + +**Validates: Requirements 14.3, 14.4, 16.1, 16.2, 16.3** + +### Property 16: AWS Node Field Completeness + +*For any* EC2 instance returned by `AWSPlugin.getInventory()`, the corresponding Node object includes instance state, type, region, VPC, and tags fields. + +**Validates: Requirement 9.4** + +### Property 17: AWS Instance Grouping Correctness + +*For any* set of EC2 instances, `AWSPlugin.getGroups()` produces groups where every instance appears in at least one group (by region, VPC, or tags), and no group contains instances that don't match its grouping criterion. + +**Validates: Requirement 9.2** + +### Property 18: Plugin Registration Uniqueness + +*For any* sequence of plugin registrations, `IntegrationManager` never contains two plugins with the same name. Registering a plugin with a name that already exists throws an error. + +**Validates: Requirements 31.1, 31.2** + +### Property 19: Permission Monotonicity + +*For any* role, adding a permission to that role never removes existing permissions, and removing a permission never adds new ones. The set of permissions only changes by the explicitly added or removed permission. + +**Validates: Requirement 29.2** + +## Error Handling + +### Error Scenario 1: Database Connection Failure + +**Condition**: PostgreSQL is unreachable at startup or connection drops during operation +**Response**: `PostgresAdapter.initialize()` throws `DatabaseConnectionError`. During operation, queries throw `DatabaseConnectionError` with retry info. +**Recovery**: Adapter implements connection pool with automatic reconnection. Services receive errors and can retry. Health check reports database as unhealthy. + +### Error Scenario 2: AWS Credentials Invalid or Expired + +**Condition**: AWS credentials in config are invalid, expired, or lack required IAM permissions +**Response**: `AWSPlugin.performHealthCheck()` returns `{ healthy: false, message: "AWS authentication failed" }`. Actions throw `AWSAuthenticationError`. +**Recovery**: Plugin reports degraded state. User can update credentials via IntegrationConfigService. Health check scheduler will detect recovery. + +### Error Scenario 3: Proxmox — Backward Compatibility + +**Condition**: Existing Proxmox configs and permissions reference the current `proxmox` resource name +**Response**: No migration needed — the plugin name remains `proxmox`. New granular actions (`provision`, `lifecycle`, `destroy`) are added alongside existing ones (`read`, `execute`, `admin`). +**Recovery**: Existing `proxmox:execute` permission continues to work. New actions are additive. Migration seeds the new action types without removing old ones. + +### Error Scenario 4: Encryption Key Rotation + +**Condition**: The encryption key for integration configs needs to be rotated +**Response**: `IntegrationConfigService.rotateEncryptionKey(oldKey, newKey)` re-encrypts all stored configs. +**Recovery**: Atomic operation within a transaction. If rotation fails, all configs remain encrypted with the old key. + +### Error Scenario 5: Journal Live Source Timeout + +**Condition**: PuppetDB or other live source times out when fetching events for journal aggregation +**Response**: `aggregateTimeline()` returns DB-stored events with a warning flag indicating incomplete live data. +**Recovery**: Subsequent requests retry live sources. Cached live data (if available) is used as fallback. + +## Testing Strategy + +### Unit Testing Approach + +- Test each `DatabaseAdapter` implementation independently with in-memory databases +- Test `IntegrationConfigService` encryption/decryption round-trips +- Test `JournalService` merge logic with mock DB and mock live sources +- Test `AWSPlugin` with mocked AWS SDK calls +- Test `ProxmoxIntegration` VM/LXC routing with mocked `ProxmoxService` +- Test new RBAC permissions seeding and permission checks +- Test config merge logic with various `.env` + DB combinations + +### Property-Based Testing Approach + +**Property Test Library**: fast-check + +- **DB Adapter**: For any sequence of insert/query operations, both adapters produce identical results +- **Config Merge**: `merge(env, db)` is idempotent: `merge(merge(env, db), db) === merge(env, db)` +- **Journal Sort**: `aggregateTimeline()` output is always sorted by timestamp descending +- **Permission Check**: `hasPermission` is consistent with `getUserPermissions` — if a permission appears in the user's permission list, `hasPermission` returns true for it +- **Encryption**: For any string input, `decrypt(encrypt(input)) === input` + +### Integration Testing Approach + +- Test full request flow: API → Service → DatabaseAdapter → actual SQLite/Postgres +- Test plugin registration and initialization with IntegrationManager +- Test journal event recording triggered by actual plugin actions +- Test RBAC permission checks through auth middleware with real DB +- Test config save/load cycle through API endpoints +- Test Proxmox: verify inventory includes both VMs and containers with correct `computeType` + +## Performance Considerations + +- **Connection Pooling**: PostgresAdapter uses `pg.Pool` with configurable `max` connections (default: 10). SQLiteAdapter uses single connection with WAL mode. +- **Query Caching**: PermissionService already caches permission checks for 5 minutes. This is retained and extended to new permission types. +- **Journal Pagination**: Journal queries use cursor-based pagination on the `(nodeId, timestamp)` composite index to avoid offset-based performance degradation. +- **Live Source Timeouts**: Journal aggregation applies a 5-second timeout per live source to prevent slow sources from blocking the timeline. +- **Inventory Cache**: IntegrationManager's existing inventory cache (5-minute TTL) applies to new plugins (AWS) and enhanced Proxmox. +- **Prepared Statements**: DatabaseAdapter supports prepared statements for frequently-executed queries (permission checks, journal inserts). + +## Security Considerations + +- **Config Encryption**: Integration configs containing sensitive fields are encrypted with AES-256-GCM using a key derived from `JWT_SECRET` + a per-record salt. The encryption key is never stored in the database. +- **AWS Credentials**: AWS access keys and secret keys are stored encrypted in `integration_configs`. IAM role-based authentication is recommended over static credentials. +- **SQL Injection Prevention**: All queries use parameterized statements through the `DatabaseAdapter` interface. No string concatenation for SQL construction. +- **Permission Escalation Prevention**: Users cannot assign permissions they don't have. Role modification requires `roles:admin` permission. Built-in roles cannot be deleted. +- **Journal Audit Trail**: Journal entries are append-only. Deletion requires `journal:admin` permission and is logged in the audit log. +- **Proxmox Token Security**: Proxmox API tokens stored in DB configs are encrypted. SSL certificate validation warnings are logged. + +## Dependencies + +### New Dependencies + +| Package | Purpose | Feature | +|---------|---------|---------| +| `pg` | PostgreSQL client for Node.js | Feature 1: DB Abstraction | +| `@aws-sdk/client-ec2` | AWS EC2 API client | Feature 2: AWS Plugin | +| `@aws-sdk/client-sts` | AWS STS for credential validation | Feature 2: AWS Plugin | + +### Existing Dependencies (Unchanged) + +| Package | Purpose | +|---------|---------| +| `sqlite3` | SQLite database driver | +| `express` | HTTP server framework | +| `zod` | Schema validation | +| `jsonwebtoken` | JWT authentication | +| `bcrypt` | Password hashing | +| `dotenv` | Environment variable loading | + +### Dependency Notes + +- `pg` is the most widely-used PostgreSQL client for Node.js with TypeScript support +- AWS SDK v3 (`@aws-sdk/client-ec2`) uses modular imports to minimize bundle size +- No ORM is introduced — the `DatabaseAdapter` is a thin abstraction, not a query builder +- All new dependencies should be pinned to specific versions in `package.json` diff --git a/.kiro/specs/pabawi-release-1-0-0/requirements.md b/.kiro/specs/pabawi-release-1-0-0/requirements.md new file mode 100644 index 00000000..89f6986e --- /dev/null +++ b/.kiro/specs/pabawi-release-1-0-0/requirements.md @@ -0,0 +1,388 @@ +# Requirements Document + +## Introduction + +This document specifies requirements for Pabawi release 1.0.0, a major release introducing six foundational feature areas: full database abstraction (SQLite + PostgreSQL), an AWS EC2 plugin, improved Proxmox VM/container provisioning, local database storage for integration configurations, a nodes journal for event tracking, and improved RBAC with fine-grained permissions. These requirements are derived from the approved technical design document. + +## Glossary + +- **Pabawi**: The web application for infrastructure management, inventory, and remote execution +- **DatabaseAdapter**: Interface abstracting database operations, with SQLiteAdapter and PostgresAdapter implementations +- **SQLiteAdapter**: DatabaseAdapter implementation for SQLite databases +- **PostgresAdapter**: DatabaseAdapter implementation for PostgreSQL databases +- **AWSPlugin**: Plugin integrating AWS EC2 into Pabawi, extending BasePlugin +- **ProxmoxIntegration**: Single plugin managing both Proxmox VMs and containers +- **ProxmoxService**: Backend service handling Proxmox API communication with explicit compute type routing +- **IntegrationConfigService**: Service for storing and retrieving integration configurations in the database per user +- **JournalService**: Service for recording and retrieving a unified timeline of node events +- **JournalEntry**: A single event record in the nodes journal +- **JournalSource**: Origin of a journal event (proxmox, aws, bolt, ansible, ssh, puppetdb, user, system) +- **PermissionService**: Service for checking user permissions against resource-action pairs +- **IntegrationManager**: Registry managing all integration plugins +- **BasePlugin**: Abstract base class for all integration plugins +- **ConfigService**: Service loading and validating application configuration from environment and database +- **Provisioner_Role**: New built-in role for infrastructure provisioning and lifecycle management +- **Effective_Config**: The merged result of .env file values and database-stored config values +- **Compute_Type**: Classification of a Proxmox guest as "vm" (QEMU) or "lxc" (container) +- **Node**: A managed infrastructure resource (VM, container, or instance) +- **Node_URI**: Unique identifier for a node within a specific integration (e.g., "aws:us-east-1:i-abc123") +- **Migration**: A versioned SQL script that modifies the database schema +- **MigrationRunner**: Component that selects and executes dialect-appropriate migration files + +## Requirements + +### Requirement 1: Database Adapter Interface + +**User Story:** As a developer, I want a unified database interface that abstracts SQLite and PostgreSQL, so that all services can operate against either database without code changes. + +#### Acceptance Criteria + +1. THE DatabaseAdapter SHALL expose query, queryOne, and execute methods that accept SQL strings and parameter arrays +2. THE DatabaseAdapter SHALL expose beginTransaction, commit, rollback, and withTransaction methods for transaction support +3. THE DatabaseAdapter SHALL expose initialize and close methods for connection lifecycle management +4. THE DatabaseAdapter SHALL expose a getDialect method returning "sqlite" or "postgres" +5. THE DatabaseAdapter SHALL expose a getPlaceholder method that returns "?" for SQLite and "$N" for PostgreSQL +6. WHEN a service calls DatabaseAdapter.query with valid SQL and parameters, THE DatabaseAdapter SHALL return an array of typed rows +7. WHEN a service calls DatabaseAdapter.queryOne, THE DatabaseAdapter SHALL return a single row or null + +### Requirement 2: SQLite Adapter Implementation + +**User Story:** As a developer, I want an SQLiteAdapter that implements DatabaseAdapter using the sqlite3 package, so that existing SQLite databases continue to work. + +#### Acceptance Criteria + +1. THE SQLiteAdapter SHALL implement all DatabaseAdapter interface methods using the sqlite3 package +2. WHEN SQLiteAdapter.initialize is called, THE SQLiteAdapter SHALL open the database file at the configured path +3. WHEN SQLiteAdapter.initialize is called, THE SQLiteAdapter SHALL enable WAL mode for concurrent read performance +4. WHEN SQLiteAdapter.getPlaceholder is called, THE SQLiteAdapter SHALL return "?" regardless of the index parameter +5. WHEN SQLiteAdapter.close is called, THE SQLiteAdapter SHALL close the database connection and release resources + +### Requirement 3: PostgreSQL Adapter Implementation + +**User Story:** As a developer, I want a PostgresAdapter that implements DatabaseAdapter using the pg package, so that Pabawi can scale to PostgreSQL for production deployments. + +#### Acceptance Criteria + +1. THE PostgresAdapter SHALL implement all DatabaseAdapter interface methods using the pg package +2. WHEN PostgresAdapter.initialize is called, THE PostgresAdapter SHALL create a connection pool using the DATABASE_URL environment variable +3. WHEN PostgresAdapter.getPlaceholder is called with index N, THE PostgresAdapter SHALL return "$N" +4. WHEN PostgresAdapter.close is called, THE PostgresAdapter SHALL drain the connection pool and release all connections +5. IF the PostgreSQL server is unreachable during initialize, THEN THE PostgresAdapter SHALL throw a DatabaseConnectionError with a descriptive message + +### Requirement 4: Database Adapter Factory + +**User Story:** As a developer, I want a factory function that creates the correct adapter based on configuration, so that the database backend is selected at startup without service changes. + +#### Acceptance Criteria + +1. WHEN DB_TYPE environment variable is "postgres", THE AdapterFactory SHALL create a PostgresAdapter +2. WHEN DB_TYPE environment variable is "sqlite" or unset, THE AdapterFactory SHALL create an SQLiteAdapter +3. IF DB_TYPE is "postgres" and DATABASE_URL is not set, THEN THE AdapterFactory SHALL throw a configuration error + +### Requirement 5: Database Migration Support + +**User Story:** As a developer, I want dialect-aware migrations, so that schema changes work correctly on both SQLite and PostgreSQL. + +#### Acceptance Criteria + +1. THE MigrationRunner SHALL support dialect-specific migration files using the naming convention NNN_name.sqlite.sql and NNN_name.postgres.sql +2. THE MigrationRunner SHALL support shared migration files using the naming convention NNN_name.sql for SQL compatible with both dialects +3. WHEN runMigrations is called, THE MigrationRunner SHALL select migration files matching the active dialect +4. WHEN runMigrations is called, THE MigrationRunner SHALL skip migrations that have already been applied +5. WHEN runMigrations is called multiple times on the same database, THE MigrationRunner SHALL produce the same schema state + +### Requirement 6: Service Migration to DatabaseAdapter + +**User Story:** As a developer, I want all existing services migrated from direct sqlite3.Database usage to DatabaseAdapter, so that the entire backend is database-agnostic. + +#### Acceptance Criteria + +1. THE UserService SHALL accept a DatabaseAdapter instead of sqlite3.Database in its constructor +2. THE RoleService SHALL accept a DatabaseAdapter instead of sqlite3.Database in its constructor +3. THE PermissionService SHALL accept a DatabaseAdapter instead of sqlite3.Database in its constructor +4. FOR ALL services currently using sqlite3.Database (UserService, RoleService, GroupService, PermissionService, SetupService, ExecutionRepository, AuthenticationService, AuditLoggingService, BatchExecutionService, PuppetRunHistoryService, ReportFilterService), THE Pabawi SHALL replace direct sqlite3 usage with DatabaseAdapter calls +5. WHEN services are migrated to DatabaseAdapter, THE Pabawi SHALL remove the private runQuery, getQuery, and allQuery helper methods from each service + +### Requirement 7: Transaction Support + +**User Story:** As a developer, I want reliable transaction support across both database backends, so that multi-step operations are atomic. + +#### Acceptance Criteria + +1. WHEN withTransaction is called and the callback resolves, THE DatabaseAdapter SHALL commit all mutations +2. WHEN withTransaction is called and the callback rejects, THE DatabaseAdapter SHALL rollback all mutations +3. IF a transaction is already active in SQLiteAdapter, THEN THE SQLiteAdapter SHALL throw an error indicating nested transactions are not supported + +### Requirement 8: AWS Plugin Registration + +**User Story:** As a system administrator, I want an AWS plugin that integrates with Pabawi's plugin architecture, so that AWS EC2 resources appear alongside other infrastructure. + +#### Acceptance Criteria + +1. THE AWSPlugin SHALL extend BasePlugin and implement both ExecutionToolPlugin and InformationSourcePlugin interfaces +2. THE AWSPlugin SHALL register with IntegrationManager using the name "aws" +3. WHEN AWSPlugin is registered, THE IntegrationManager SHALL treat the AWSPlugin as a single plugin handling all AWS services +4. WHEN AWS configuration is disabled, THE IntegrationManager SHALL skip AWSPlugin registration + +### Requirement 9: AWS EC2 Inventory + +**User Story:** As a system administrator, I want to see EC2 instances in Pabawi's inventory, so that I can manage AWS resources alongside on-premise infrastructure. + +#### Acceptance Criteria + +1. WHEN AWSPlugin.getInventory is called, THE AWSPlugin SHALL return all EC2 instances as Node objects +2. WHEN AWSPlugin.getGroups is called, THE AWSPlugin SHALL group instances by region, VPC, and tags +3. WHEN AWSPlugin.getNodeFacts is called with a valid instance ID, THE AWSPlugin SHALL return instance metadata as a Facts object +4. THE AWSPlugin SHALL include instance state, type, region, VPC, and tags in each Node object + +### Requirement 10: AWS EC2 Provisioning + +**User Story:** As a system administrator, I want to provision new EC2 instances from Pabawi, so that I can manage AWS infrastructure without switching tools. + +#### Acceptance Criteria + +1. WHEN a provision action is executed on AWSPlugin, THE AWSPlugin SHALL call the EC2 runInstances API with the specified parameters +2. WHEN provisioning succeeds, THE AWSPlugin SHALL return an ExecutionResult with status "success" and the new instance ID +3. IF provisioning fails, THEN THE AWSPlugin SHALL return an ExecutionResult with status "failed" and a descriptive AWS error message +4. WHEN provisioning completes (success or failure), THE AWSPlugin SHALL record a journal entry via JournalService + +### Requirement 11: AWS EC2 Lifecycle Actions + +**User Story:** As a system administrator, I want to start, stop, reboot, and terminate EC2 instances from Pabawi, so that I can manage instance lifecycle centrally. + +#### Acceptance Criteria + +1. WHEN a lifecycle action (start, stop, reboot, terminate) is executed on AWSPlugin, THE AWSPlugin SHALL call the corresponding EC2 API +2. WHEN a lifecycle action succeeds, THE AWSPlugin SHALL return an ExecutionResult with status "success" +3. IF AWS credentials are invalid or expired, THEN THE AWSPlugin SHALL throw an AWSAuthenticationError with a descriptive message +4. WHEN a lifecycle action completes, THE AWSPlugin SHALL record a journal entry via JournalService + +### Requirement 12: AWS Plugin Health Check + +**User Story:** As a system administrator, I want AWS health status visible in Pabawi, so that I can monitor connectivity to AWS services. + +#### Acceptance Criteria + +1. WHEN AWSPlugin.performHealthCheck is called with valid credentials, THE AWSPlugin SHALL return healthy status with AWS account details +2. IF AWS credentials are invalid, THEN THE AWSPlugin SHALL return unhealthy status with message "AWS authentication failed" +3. WHEN AWSPlugin reports unhealthy status, THE AWSPlugin SHALL continue to accept configuration updates without crashing + +### Requirement 13: AWS Resource Discovery + +**User Story:** As a system administrator, I want to browse available AWS regions, instance types, AMIs, VPCs, subnets, security groups, and key pairs, so that I can configure provisioning parameters. + +#### Acceptance Criteria + +1. WHEN AWSPlugin.getRegions is called, THE AWSPlugin SHALL return available AWS regions +2. WHEN AWSPlugin.getInstanceTypes is called, THE AWSPlugin SHALL return available EC2 instance types +3. WHEN AWSPlugin.getAMIs is called with a region, THE AWSPlugin SHALL return available AMIs for that region +4. WHEN AWSPlugin.getVPCs is called with a region, THE AWSPlugin SHALL return available VPCs +5. WHEN AWSPlugin.getSubnets is called with a region, THE AWSPlugin SHALL return available subnets +6. WHEN AWSPlugin.getSecurityGroups is called with a region, THE AWSPlugin SHALL return available security groups +7. WHEN AWSPlugin.getKeyPairs is called with a region, THE AWSPlugin SHALL return available key pairs + +### Requirement 14: Proxmox Single Plugin Architecture + +**User Story:** As a developer, I want Proxmox to remain a single plugin handling both VMs and containers internally, so that the integration model stays consistent and avoids plugin proliferation. + +#### Acceptance Criteria + +1. THE ProxmoxIntegration SHALL register with IntegrationManager using the single name "proxmox" +2. THE ProxmoxIntegration SHALL handle both VM (QEMU) and container (LXC) operations within the same plugin instance +3. WHEN ProxmoxIntegration.getInventory is called, THE ProxmoxIntegration SHALL return both VMs and containers in a single result set +4. FOR ALL inventory items returned by ProxmoxIntegration, THE ProxmoxIntegration SHALL include a computeType field with value "vm" or "lxc" + +### Requirement 15: Proxmox Compute Type Routing + +**User Story:** As a system administrator, I want Proxmox provisioning to route to the correct API endpoint based on compute type, so that VMs and containers are created correctly. + +#### Acceptance Criteria + +1. WHEN a provisioning action with computeType "vm" is received, THE ProxmoxService SHALL call the Proxmox QEMU API endpoint +2. WHEN a provisioning action with computeType "lxc" is received, THE ProxmoxService SHALL call the Proxmox LXC API endpoint +3. THE ProxmoxService SHALL expose separate createVM and createLXC methods with type-specific parameter validation +4. WHEN a lifecycle action is executed, THE ProxmoxService SHALL determine the guest type internally and route to the correct API + +### Requirement 16: Proxmox Inventory Filtering + +**User Story:** As a system administrator, I want to filter Proxmox inventory by compute type, so that I can view VMs and containers separately. + +#### Acceptance Criteria + +1. WHEN ProxmoxService.getInventory is called with computeType "qemu", THE ProxmoxService SHALL return only VMs +2. WHEN ProxmoxService.getInventory is called with computeType "lxc", THE ProxmoxService SHALL return only containers +3. WHEN ProxmoxService.getInventory is called without a computeType filter, THE ProxmoxService SHALL return all guests +4. THE ProxmoxIntegration SHALL include type-based groups ("Proxmox VMs", "Proxmox Containers") in getGroups results + +### Requirement 17: Proxmox UI Separation + +**User Story:** As a system administrator, I want separate provisioning forms for VMs and containers in the UI, so that I see only the relevant configuration options for each compute type. + +#### Acceptance Criteria + +1. WHEN the user selects VM provisioning, THE Pabawi frontend SHALL display a VM-specific provisioning form with QEMU parameters +2. WHEN the user selects container provisioning, THE Pabawi frontend SHALL display a container-specific provisioning form with LXC parameters +3. THE Pabawi frontend SHALL submit both VM and container provisioning requests to the same "proxmox" backend plugin + +### Requirement 18: Integration Config Storage + +**User Story:** As a system administrator, I want to store integration configurations in the database per user, so that I can manage settings through the UI without editing .env files. + +#### Acceptance Criteria + +1. THE IntegrationConfigService SHALL support saving, retrieving, updating, and deleting integration configs per user +2. THE IntegrationConfigService SHALL enforce a unique constraint of one active config per integration per user +3. WHEN IntegrationConfigService.saveConfig is called, THE IntegrationConfigService SHALL validate the config against the integration-specific Zod schema +4. WHEN IntegrationConfigService.saveConfig is called, THE IntegrationConfigService SHALL encrypt sensitive fields (matching patterns: *token*, *password*, *secret*, *key*) using AES-256-GCM before storage +5. WHEN IntegrationConfigService.getConfig is called, THE IntegrationConfigService SHALL decrypt sensitive fields before returning the config + +### Requirement 19: Config Merge Strategy + +**User Story:** As a system administrator, I want database configs to override .env values, so that UI-configured settings take precedence over file-based defaults. + +#### Acceptance Criteria + +1. WHEN IntegrationConfigService.getEffectiveConfig is called, THE IntegrationConfigService SHALL merge .env values as base with database values overriding +2. WHEN a database config value is non-null for a key, THE IntegrationConfigService SHALL use the database value over the .env value +3. WHEN no database config exists for an integration, THE IntegrationConfigService SHALL return the .env config only +4. WHEN no .env config exists for an integration, THE IntegrationConfigService SHALL return the database config only + +### Requirement 20: Config Encryption + +**User Story:** As a system administrator, I want sensitive configuration values encrypted at rest, so that credentials are protected in the database. + +#### Acceptance Criteria + +1. THE IntegrationConfigService SHALL encrypt sensitive fields using AES-256-GCM with a key derived from JWT_SECRET plus a per-record salt +2. THE IntegrationConfigService SHALL store encrypted values in the integration_configs table config column +3. FOR ALL config values, decrypting an encrypted value SHALL produce the original plaintext value +4. WHEN IntegrationConfigService.rotateEncryptionKey is called, THE IntegrationConfigService SHALL re-encrypt all stored configs atomically within a transaction + +### Requirement 21: Integration Config UI + +**User Story:** As a system administrator, I want UI pages for managing integration configurations, so that I can configure integrations without server access. + +#### Acceptance Criteria + +1. THE Pabawi frontend SHALL provide configuration pages for each registered integration +2. WHEN a user saves an integration config through the UI, THE Pabawi SHALL call IntegrationConfigService.saveConfig with the user's ID +3. WHEN displaying stored configs, THE Pabawi frontend SHALL mask sensitive field values by default + +### Requirement 22: Journal Event Recording + +**User Story:** As a system administrator, I want all provisioning events, lifecycle actions, and execution results recorded in a journal, so that I have a complete audit trail for each node. + +#### Acceptance Criteria + +1. WHEN a provisioning action completes (success or failure), THE JournalService SHALL record exactly one journal entry +2. WHEN a lifecycle action completes, THE JournalService SHALL record exactly one journal entry +3. WHEN an execution result is produced by any plugin, THE JournalService SHALL record exactly one journal entry +4. FOR ALL journal entries, THE JournalService SHALL include nodeId, nodeUri, eventType, source, action, summary, timestamp, and optionally userId and details + +### Requirement 23: Journal Timeline Aggregation + +**User Story:** As a system administrator, I want a unified timeline combining stored events and live PuppetDB data, so that I see the complete history of a node in one view. + +#### Acceptance Criteria + +1. WHEN JournalService.aggregateTimeline is called, THE JournalService SHALL fetch both database-stored events and live-source events +2. WHEN aggregating events, THE JournalService SHALL mark database events with isLive false and live-fetched events with isLive true +3. THE JournalService SHALL sort the aggregated timeline by timestamp in descending order +4. IF a live source fails during aggregation, THEN THE JournalService SHALL return database events and skip the failed source +5. WHEN options include limit and offset, THE JournalService SHALL apply pagination to the merged result + +### Requirement 24: Journal Manual Notes + +**User Story:** As a system administrator, I want to add manual notes to nodes in the journal, so that I can document observations and decisions alongside automated events. + +#### Acceptance Criteria + +1. WHEN JournalService.addNote is called with a nodeId, userId, and content, THE JournalService SHALL create a journal entry with eventType "note" and source "user" +2. THE JournalService SHALL include the userId of the note author in the journal entry +3. WHEN searching journal entries, THE JournalService SHALL include manual notes in search results + +### Requirement 25: Journal Source Naming + +**User Story:** As a developer, I want journal sources to use integration-level names, so that the journal model is consistent with the single-plugin architecture. + +#### Acceptance Criteria + +1. THE JournalService SHALL use "proxmox" as the source for all Proxmox events (both VM and container) +2. THE JournalService SHALL use "aws" as the source for all AWS events +3. THE JournalService SHALL validate that the source field matches one of the defined JournalSource values (proxmox, aws, bolt, ansible, ssh, puppetdb, user, system) + +### Requirement 26: Journal Data Model + +**User Story:** As a developer, I want a well-defined journal entries table, so that events are stored consistently and queryable. + +#### Acceptance Criteria + +1. THE journal_entries table SHALL include columns: id, nodeId, nodeUri, eventType, source, action, summary, details (JSON), userId, and timestamp +2. THE journal_entries table SHALL have indexes on nodeId, timestamp (descending), eventType, source, and a composite index on (nodeId, timestamp descending) +3. THE journal_entries table SHALL enforce that eventType is one of the defined JournalEventType values +4. THE journal_entries table SHALL enforce that timestamp is in ISO 8601 format + +### Requirement 27: New Permission Resources and Actions + +**User Story:** As a system administrator, I want fine-grained permissions for new features, so that I can control access to provisioning, lifecycle, journal, and config operations independently. + +#### Acceptance Criteria + +1. THE Pabawi SHALL add permission resource "proxmox" with actions: read, lifecycle, provision, destroy, admin +2. THE Pabawi SHALL add permission resource "aws" with actions: read, lifecycle, provision, destroy, admin +3. THE Pabawi SHALL add permission resource "journal" with actions: read, note, admin +4. THE Pabawi SHALL add permission resource "integration_config" with actions: read, configure, admin +5. THE Pabawi SHALL seed new permissions via a database migration + +### Requirement 28: Provisioner Built-in Role + +**User Story:** As a system administrator, I want a Provisioner built-in role, so that I can grant infrastructure provisioning access without full admin privileges. + +#### Acceptance Criteria + +1. THE Pabawi SHALL create a built-in "Provisioner" role via database migration +2. THE Provisioner_Role SHALL include read, provision, destroy, and lifecycle permissions for proxmox and aws resources +3. THE Provisioner_Role SHALL include read and note permissions for the journal resource +4. THE Provisioner_Role SHALL include read permission for the integration_config resource +5. THE Provisioner_Role SHALL be marked as isBuiltIn and protected from deletion + +### Requirement 29: RBAC Backward Compatibility + +**User Story:** As a system administrator, I want existing permission checks to continue working after the upgrade, so that current users are not disrupted. + +#### Acceptance Criteria + +1. WHEN existing code checks hasPermission with resource "proxmox" and action "execute", THE PermissionService SHALL continue to return the correct result +2. THE Pabawi SHALL add new granular permissions (provision, destroy, lifecycle, configure, note, export) without removing existing permissions (read, write, execute, admin) +3. FOR ALL existing built-in roles (Viewer, Operator, Administrator), THE Pabawi SHALL preserve their current permission assignments +4. THE Administrator role SHALL receive all new permissions in addition to existing ones + +### Requirement 30: Permission Check Performance + +**User Story:** As a developer, I want permission checks to be fast, so that authorization does not add noticeable latency to API requests. + +#### Acceptance Criteria + +1. THE PermissionService SHALL cache permission check results with a configurable TTL +2. WHEN a permission is added to or removed from a role, THE PermissionService SHALL invalidate the relevant cache entries +3. THE PermissionService SHALL support the new permission types (provision, destroy, lifecycle, configure, note, export) in cache lookups + +### Requirement 31: Plugin Registration Uniqueness + +**User Story:** As a developer, I want the IntegrationManager to prevent duplicate plugin names, so that plugin identity is unambiguous. + +#### Acceptance Criteria + +1. WHEN a plugin is registered with a name that already exists, THE IntegrationManager SHALL throw an error +2. THE IntegrationManager SHALL enforce that each registered plugin has a unique name + +### Requirement 32: Integration Config Database Table + +**User Story:** As a developer, I want a well-defined integration_configs table, so that configs are stored consistently with proper constraints. + +#### Acceptance Criteria + +1. THE integration_configs table SHALL include columns: id, userId, integrationName, config (JSON), isActive, createdAt, updatedAt +2. THE integration_configs table SHALL enforce a UNIQUE constraint on (userId, integrationName) +3. THE integration_configs table SHALL have a foreign key from userId to users(id) with ON DELETE CASCADE +4. THE integration_configs table SHALL have indexes on userId, integrationName, and isActive diff --git a/.kiro/specs/pabawi-release-1-0-0/tasks.md b/.kiro/specs/pabawi-release-1-0-0/tasks.md new file mode 100644 index 00000000..98337955 --- /dev/null +++ b/.kiro/specs/pabawi-release-1-0-0/tasks.md @@ -0,0 +1,389 @@ +# Implementation Plan: Pabawi Release 1.0.0 + +## Overview + +This plan implements six foundational features for Pabawi 1.0.0 in dependency order: database abstraction first (foundational), then service migration, followed by new features (journal, config storage, AWS plugin, Proxmox enhancements, RBAC) that build on the adapter. Each task builds incrementally on previous work. TypeScript is used throughout. + +## Tasks + +- [x] 1. Database Adapter Interface and Implementations + - [x] 1.1 Create the DatabaseAdapter interface, factory, and error types + - Create `backend/src/database/DatabaseAdapter.ts` with the `DatabaseAdapter` interface: `query`, `queryOne`, `execute`, `beginTransaction`, `commit`, `rollback`, `withTransaction`, `initialize`, `close`, `isConnected`, `getDialect`, `getPlaceholder` + - Create `backend/src/database/errors.ts` with `DatabaseQueryError`, `DatabaseConnectionError` error classes + - Create `backend/src/database/AdapterFactory.ts` with `createDatabaseAdapter(config)` that returns SQLiteAdapter when DB_TYPE is "sqlite" or unset, PostgresAdapter when DB_TYPE is "postgres", and throws if DB_TYPE is "postgres" without DATABASE_URL + - _Requirements: 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 4.1, 4.2, 4.3_ + + - [x] 1.2 Implement SQLiteAdapter + - Create `backend/src/database/SQLiteAdapter.ts` implementing all DatabaseAdapter methods using the `sqlite3` package + - Enable WAL mode on initialize, return `"?"` from getPlaceholder, implement withTransaction with rollback on error, throw error on nested transactions + - _Requirements: 2.1, 2.2, 2.3, 2.4, 2.5, 7.1, 7.2, 7.3_ + + - [x] 1.3 Implement PostgresAdapter + - Create `backend/src/database/PostgresAdapter.ts` implementing all DatabaseAdapter methods using the `pg` package + - Create connection pool from DATABASE_URL on initialize, return `"$N"` from getPlaceholder, implement withTransaction using pool client, throw DatabaseConnectionError if server unreachable + - Add `pg` and `@types/pg` as dependencies in `backend/package.json` + - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5, 7.1, 7.2_ + + - [ ]* 1.4 Write property test: Placeholder Dialect Correctness + - **Property 2: Placeholder Dialect Correctness** + - For any positive integer N, SQLiteAdapter.getPlaceholder(N) returns "?" and PostgresAdapter.getPlaceholder(N) returns "$N" + - **Validates: Requirements 1.5, 2.4, 3.3** + + - [ ]* 1.5 Write property test: Transaction Atomicity + - **Property 3: Transaction Atomicity** + - For any set of mutations in withTransaction, if callback resolves all mutations are committed; if callback rejects no mutations are visible + - **Validates: Requirements 7.1, 7.2** + +- [x] 2. Database Migration System Upgrade + - [x] 2.1 Refactor MigrationRunner for dialect-aware migrations + - Modify `backend/src/database/MigrationRunner.ts` to accept a DatabaseAdapter instead of sqlite3.Database + - Support dialect-specific files (`NNN_name.sqlite.sql`, `NNN_name.postgres.sql`) and shared files (`NNN_name.sql`) + - Select migration files matching the active dialect, skip already-applied migrations, track applied migrations in a `schema_migrations` table + - _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5_ + + - [ ]* 2.2 Write property test: Migration Idempotency + - **Property 4: Migration Idempotency** + - Running runMigrations() multiple times on the same database produces the same schema state; already-applied migrations are skipped + - **Validates: Requirements 5.4, 5.5** + + - [ ]* 2.3 Write property test: Migration Dialect Selection + - **Property 5: Migration Dialect Selection** + - For any set of migration files with dialect-specific variants, MigrationRunner selects only files matching the active adapter's dialect + - **Validates: Requirement 5.3** + +- [x] 3. Migrate Existing Services to DatabaseAdapter + - [x] 3.1 Migrate UserService and AuthenticationService + - Replace `private db: Database` with `private db: DatabaseAdapter` in `backend/src/services/UserService.ts` + - Replace `runQuery/getQuery/allQuery` helpers with `db.execute/db.queryOne/db.query` calls + - Update constructor to accept DatabaseAdapter instead of sqlite3.Database + - Apply same migration to `backend/src/services/AuthenticationService.ts` + - _Requirements: 6.1, 6.4, 6.5_ + + - [x] 3.2 Migrate RoleService, GroupService, and PermissionService + - Replace sqlite3.Database with DatabaseAdapter in `backend/src/services/RoleService.ts`, `backend/src/services/GroupService.ts`, `backend/src/services/PermissionService.ts` + - Remove private runQuery/getQuery/allQuery helpers, use adapter methods directly + - _Requirements: 6.2, 6.3, 6.4, 6.5_ + + - [x] 3.3 Migrate SetupService, AuditLoggingService, and BatchExecutionService + - Replace sqlite3.Database with DatabaseAdapter in `backend/src/services/SetupService.ts`, `backend/src/services/AuditLoggingService.ts`, `backend/src/services/BatchExecutionService.ts` + - Remove private runQuery/getQuery/allQuery helpers, use adapter methods directly + - _Requirements: 6.4, 6.5_ + + - [x] 3.4 Migrate ExecutionRepository, PuppetRunHistoryService, and ReportFilterService + - Replace sqlite3.Database with DatabaseAdapter in `backend/src/database/ExecutionRepository.ts`, `backend/src/services/PuppetRunHistoryService.ts`, `backend/src/services/ReportFilterService.ts` + - Remove private runQuery/getQuery/allQuery helpers, use adapter methods directly + - _Requirements: 6.4, 6.5_ + + - [x] 3.5 Update DatabaseService and server.ts bootstrap + - Refactor `backend/src/database/DatabaseService.ts` to use AdapterFactory and DatabaseAdapter + - Update `backend/src/server.ts` to create adapter via `createDatabaseAdapter(config)`, call `initialize()` and `runMigrations()`, then pass adapter to all services + - Ensure all service constructors receive DatabaseAdapter instead of sqlite3.Database + - _Requirements: 6.4, 6.5_ + +- [x] 4. Checkpoint - Database abstraction complete + - Ensure all tests pass, ask the user if questions arise. + - Verify that the application starts with SQLite adapter and all existing functionality works through DatabaseAdapter. + +- [x] 5. RBAC Enhancements — New Permissions and Provisioner Role + - [x] 5.1 Create migration for new permissions and Provisioner role + - Create `backend/src/database/migrations/007_new_permissions_and_provisioner_role.sql` seeding: + - proxmox permissions: read, lifecycle, provision, destroy, admin + - aws permissions: read, lifecycle, provision, destroy, admin + - journal permissions: read, note, admin + - integration_config permissions: read, configure, admin + - Provisioner built-in role with appropriate permission assignments + - Assign all new permissions to the existing Administrator role + - Preserve all existing permissions and role assignments (backward compatible) + - _Requirements: 27.1, 27.2, 27.3, 27.4, 27.5, 28.1, 28.2, 28.3, 28.4, 28.5, 29.2, 29.3, 29.4_ + + - [x] 5.2 Update PermissionService with caching for new permission types + - Add TTL-based cache for permission check results in `backend/src/services/PermissionService.ts` + - Invalidate cache entries when permissions are added/removed from roles + - Ensure new action types (provision, destroy, lifecycle, configure, note, export) work in cache lookups + - Verify existing hasPermission checks (e.g., "proxmox" + "execute") continue to work + - _Requirements: 29.1, 29.2, 30.1, 30.2, 30.3_ + + - [x] 5.3 Write property test: Permission Monotonicity + - **Property 19: Permission Monotonicity** + - Adding a permission to a role never removes existing permissions; removing a permission never adds new ones + - **Validates: Requirement 29.2** + +- [x] 6. Journal Service — Data Model and Core Logic + - [x] 6.1 Create journal_entries migration and Zod schemas + - Create `backend/src/database/migrations/008_journal_entries.sql` with the journal_entries table: id, nodeId, nodeUri, eventType, source, action, summary, details (JSON), userId, timestamp + - Add indexes on nodeId, timestamp DESC, eventType, source, and composite (nodeId, timestamp DESC) + - Add CHECK constraints for eventType and source validation + - Create `backend/src/services/journal/types.ts` with JournalEntry, CreateJournalEntry, JournalEventType, JournalSource types and Zod schemas + - _Requirements: 25.1, 25.2, 25.3, 26.1, 26.2, 26.3, 26.4_ + + - [x] 6.2 Implement JournalService core (recordEvent, addNote, getNodeTimeline) + - Create `backend/src/services/journal/JournalService.ts` with: + - `recordEvent(entry)`: validate and insert a journal entry + - `addNote(nodeId, userId, content)`: create entry with eventType "note" and source "user" + - `getNodeTimeline(nodeId, options)`: query journal_entries with pagination + - `searchEntries(query, options)`: full-text search across summary and details + - Validate source against JournalSource enum (proxmox, aws, bolt, ansible, ssh, puppetdb, user, system) + - _Requirements: 22.1, 22.2, 22.3, 22.4, 24.1, 24.2, 24.3_ + + - [x] 6.3 Implement JournalService aggregateTimeline with live source merging + - Add `aggregateTimeline(nodeId, options)` to JournalService + - Fetch DB-stored events and live-source events (e.g., PuppetDB) in parallel + - Mark DB events with isLive=false, live events with isLive=true + - Sort merged results by timestamp descending, apply limit/offset pagination + - Gracefully skip failed live sources, returning DB events only + - _Requirements: 23.1, 23.2, 23.3, 23.4, 23.5_ + + - [ ]* 6.4 Write property test: Journal Timeline Sort Order + - **Property 11: Journal Timeline Sort Order** + - For any set of journal entries, aggregateTimeline returns entries sorted by timestamp descending + - **Validates: Requirement 23.3** + + - [ ]* 6.5 Write property test: Journal Source Validation + - **Property 14: Journal Source Validation** + - For any string not in {proxmox, aws, bolt, ansible, ssh, puppetdb, user, system}, recording fails; for any value in the set, recording succeeds + - **Validates: Requirements 25.3, 26.3** + + - [ ]* 6.6 Write property test: Journal isLive Flag Correctness + - **Property 12: Journal isLive Flag Correctness** + - DB-originated entries have isLive===false, live-fetched entries have isLive===true + - **Validates: Requirement 23.2** + + - [ ]* 6.7 Write property test: Journal Pagination Bounds + - **Property 13: Journal Pagination Bounds** + - For any aggregation with limit L and offset O, result contains at most L entries + - **Validates: Requirement 23.5** + +- [x] 7. Integration Config Service — Storage, Encryption, and Merge + - [x] 7.1 Create integration_configs migration and types + - Create `backend/src/database/migrations/009_integration_configs.sql` with integration_configs table: id, userId, integrationName, config (JSON), isActive, createdAt, updatedAt + - Add UNIQUE(userId, integrationName) constraint, FK to users(id) ON DELETE CASCADE + - Add indexes on userId, integrationName, isActive + - Create `backend/src/services/IntegrationConfigService.types.ts` with IntegrationConfigRecord interface and Zod schemas + - _Requirements: 32.1, 32.2, 32.3, 32.4_ + + - [x] 7.2 Implement IntegrationConfigService with encryption + - Create `backend/src/services/IntegrationConfigService.ts` with: + - `saveConfig(userId, integrationName, config)`: validate against integration Zod schema, encrypt sensitive fields (matching _token_, _password_, _secret_, _key_) with AES-256-GCM using JWT_SECRET + per-record salt, upsert into DB + - `getConfig(userId, integrationName)`: retrieve and decrypt sensitive fields + - `deleteConfig(userId, integrationName)`: remove config record + - `listConfigs(userId)`: list all configs for a user + - `getActiveConfigs()`: retrieve all active configs (decrypted) + - _Requirements: 18.1, 18.2, 18.3, 18.4, 18.5, 20.1, 20.2, 20.3_ + + - [x] 7.3 Implement getEffectiveConfig and rotateEncryptionKey + - Add `getEffectiveConfig(integrationName)`: merge .env config as base with DB config overriding for non-null keys + - Add `rotateEncryptionKey(oldKey, newKey)`: re-encrypt all stored configs atomically within a transaction + - _Requirements: 19.1, 19.2, 19.3, 19.4, 20.4_ + + - [ ]* 7.4 Write property test: Encryption Round-Trip + - **Property 7: Encryption Round-Trip** + - For any string value v, decrypt(encrypt(v)) === v + - **Validates: Requirements 18.4, 18.5, 20.3** + + - [ ]* 7.5 Write property test: Config Merge Determinism + - **Property 6: Config Merge Determinism** + - For any .env config and DB config, getEffectiveConfig returns merged result where DB values override .env for overlapping non-null keys + - **Validates: Requirements 19.1, 19.2** + + - [ ]* 7.6 Write property test: Sensitive Field Encryption at Rest + - **Property 9: Sensitive Field Encryption at Rest** + - For any config with sensitive fields, raw DB value differs from plaintext after saveConfig + - **Validates: Requirement 18.4** + + - [ ]* 7.7 Write property test: Integration Config CRUD Round-Trip + - **Property 8: Integration Config CRUD Round-Trip** + - Saving then retrieving a config for the same user/integration returns equivalent config with sensitive fields decrypted + - **Validates: Requirement 18.1** + +- [x] 8. Checkpoint - Core services complete + - Ensure all tests pass, ask the user if questions arise. + - Verify RBAC migration seeds correctly, JournalService records and retrieves events, IntegrationConfigService encrypts/decrypts and merges configs. + +- [x] 9. AWS Plugin — Core Implementation + - [x] 9.1 Create AWSPlugin skeleton and types + - Create `backend/src/integrations/aws/` directory + - Create `backend/src/integrations/aws/types.ts` with AWS-specific types: AWSConfig, InstanceTypeInfo, AMIInfo, VPCInfo, SubnetInfo, SecurityGroupInfo, KeyPairInfo, AWSAuthenticationError + - Create `backend/src/integrations/aws/AWSPlugin.ts` extending BasePlugin, implementing ExecutionToolPlugin and InformationSourcePlugin with type="both" + - Register with IntegrationManager using name "aws" + - _Requirements: 8.1, 8.2, 8.3, 8.4_ + + - [x] 9.2 Implement AWS EC2 inventory and resource discovery + - Create `backend/src/integrations/aws/AWSService.ts` wrapping @aws-sdk/client-ec2 + - Implement `getInventory()`: list EC2 instances as Node objects with state, type, region, VPC, tags + - Implement `getGroups()`: group instances by region, VPC, and tags + - Implement `getNodeFacts(nodeId)`: return instance metadata as Facts + - Implement resource discovery: `getRegions`, `getInstanceTypes`, `getAMIs`, `getVPCs`, `getSubnets`, `getSecurityGroups`, `getKeyPairs` + - Add `@aws-sdk/client-ec2` and `@aws-sdk/client-sts` as dependencies + - _Requirements: 9.1, 9.2, 9.3, 9.4, 13.1, 13.2, 13.3, 13.4, 13.5, 13.6, 13.7_ + + - [x] 9.3 Implement AWS EC2 provisioning and lifecycle actions + - Implement `executeAction(action)` in AWSPlugin routing to: + - Provisioning: call EC2 runInstances, return ExecutionResult with instance ID on success + - Lifecycle: start, stop, reboot, terminate via corresponding EC2 APIs + - Record journal entry via JournalService on every action completion (success or failure) + - Throw AWSAuthenticationError on invalid/expired credentials + - _Requirements: 10.1, 10.2, 10.3, 10.4, 11.1, 11.2, 11.3, 11.4_ + + - [x] 9.4 Implement AWS health check + - Implement `performHealthCheck()` using STS GetCallerIdentity to validate credentials + - Return healthy with account details on success, unhealthy with "AWS authentication failed" on invalid credentials + - Ensure plugin continues accepting config updates when unhealthy + - _Requirements: 12.1, 12.2, 12.3_ + + - [ ]* 9.5 Write property test: AWS Node Field Completeness + - **Property 16: AWS Node Field Completeness** + - For any EC2 instance returned by getInventory, the Node includes state, type, region, VPC, and tags + - **Validates: Requirement 9.4** + + - [ ]* 9.6 Write property test: AWS Instance Grouping Correctness + - **Property 17: AWS Instance Grouping Correctness** + - Every instance appears in at least one group; no group contains instances that don't match its criterion + - **Validates: Requirement 9.2** + + - [ ]* 9.7 Write property test: Plugin Registration Uniqueness + - **Property 18: Plugin Registration Uniqueness** + - IntegrationManager never contains two plugins with the same name; duplicate registration throws + - **Validates: Requirements 31.1, 31.2** + +- [x] 10. Proxmox Enhancements — Compute Type Routing and UI Separation + - [x] 10.1 Enhance ProxmoxService with explicit compute type routing + - Add `createVM(node, params)` and `createLXC(node, params)` methods to `backend/src/integrations/proxmox/ProxmoxService.ts` with type-specific parameter validation + - Update `getInventory(computeType?)` to support optional "qemu" or "lxc" filter, include `computeType` field ("vm" | "lxc") on every inventory item + - Update `executeAction` to route lifecycle actions by determining guest type internally + - Update `getGroups()` to include type-based groups ("Proxmox VMs", "Proxmox Containers") + - _Requirements: 14.1, 14.2, 14.3, 14.4, 15.1, 15.2, 15.3, 15.4, 16.1, 16.2, 16.3, 16.4_ + + - [ ]* 10.2 Write property test: Proxmox Compute Type Partition + - **Property 15: Proxmox Compute Type Partition** + - Every inventory item has computeType "vm" or "lxc"; filtering by each produces disjoint sets whose union equals the full inventory + - **Validates: Requirements 14.3, 14.4, 16.1, 16.2, 16.3** + +- [x] 11. Checkpoint - Plugins complete + - Ensure all tests pass, ask the user if questions arise. + - Verify AWS plugin registers, discovers EC2 resources, and records journal events. Verify Proxmox compute type routing works for both VMs and containers. + +- [x] 12. Backend API Routes — Journal, Config, and AWS + - [x] 12.1 Create Journal API routes + - Create `backend/src/routes/journal.ts` with endpoints: + - `GET /api/journal/:nodeId` — get node timeline (calls aggregateTimeline) + - `POST /api/journal/:nodeId/notes` — add manual note (calls addNote) + - `GET /api/journal/search` — search journal entries + - Add auth middleware with permission checks: "journal:read" for GET, "journal:note" for POST + - Register routes in server.ts + - _Requirements: 22.4, 23.1, 24.1, 27.3_ + + - [x] 12.2 Create Integration Config API routes + - Create `backend/src/routes/integrationConfig.ts` with endpoints: + - `GET /api/config/integrations/:name` — get effective config + - `PUT /api/config/integrations/:name` — save config + - `DELETE /api/config/integrations/:name` — delete config + - `GET /api/config/integrations` — list user's configs + - Add auth middleware with permission checks: "integration_config:read" for GET, "integration_config:configure" for PUT/DELETE + - Register routes in server.ts + - _Requirements: 18.1, 19.1, 21.2, 27.4_ + + - [x] 12.3 Create AWS integration API routes + - Create `backend/src/routes/integrations/aws.ts` with endpoints: + - `GET /api/integrations/aws/inventory` — list EC2 instances + - `POST /api/integrations/aws/provision` — provision EC2 instance + - `POST /api/integrations/aws/lifecycle` — lifecycle actions (start/stop/reboot/terminate) + - `GET /api/integrations/aws/regions` — list regions + - `GET /api/integrations/aws/instance-types` — list instance types + - `GET /api/integrations/aws/amis` — list AMIs by region + - `GET /api/integrations/aws/vpcs` — list VPCs by region + - `GET /api/integrations/aws/subnets` — list subnets + - `GET /api/integrations/aws/security-groups` — list security groups + - `GET /api/integrations/aws/key-pairs` — list key pairs + - Add auth middleware with permission checks: "aws:read", "aws:provision", "aws:lifecycle" + - Register routes in server.ts + - _Requirements: 8.1, 9.1, 10.1, 11.1, 13.1-13.7, 27.2_ + +- [x] 13. AWS Plugin Registration and ConfigService Integration + - [x] 13.1 Register AWSPlugin in IntegrationManager and update ConfigService + - Update `backend/src/config/ConfigService.ts` to parse AWS configuration from environment variables (AWS_ENABLED, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_DEFAULT_REGION) + - Update `backend/src/config/schema.ts` to add AWS config to the Zod schema + - Update IntegrationManager setup in server.ts to register AWSPlugin when AWS is enabled + - Wire IntegrationConfigService.getEffectiveConfig into plugin initialization so DB configs override .env + - _Requirements: 8.2, 8.3, 8.4, 19.1_ + + - [ ]* 13.2 Write unit tests for AWS plugin registration + - Test AWSPlugin registers with name "aws" and type "both" + - Test IntegrationManager skips registration when AWS is disabled + - Test duplicate registration throws error + - _Requirements: 8.2, 8.3, 8.4, 31.1_ + +- [x] 14. Frontend — Proxmox UI Separation + - [x] 14.1 Split ProxmoxProvisionForm into VM and Container forms + - Create `frontend/src/components/ProxmoxVMProvisionForm.svelte` with QEMU-specific parameters (ISO images, CPU sockets, BIOS, machine type) + - Create `frontend/src/components/ProxmoxLXCProvisionForm.svelte` with LXC-specific parameters (OS templates, unprivileged flag, nesting) + - Both forms submit to the same "proxmox" backend plugin with appropriate computeType metadata + - Update `frontend/src/pages/ProvisionPage.svelte` to show a compute type selector (VM vs Container) that renders the correct form + - _Requirements: 17.1, 17.2, 17.3_ + +- [x] 15. Frontend — AWS Provisioning and Integration Config UI + - [x] 15.1 Create AWS provisioning UI + - Create `frontend/src/components/AWSProvisionForm.svelte` with EC2 parameters: region, instance type, AMI, VPC, subnet, security group, key pair + - Add AWS as a provisioning option in `frontend/src/pages/ProvisionPage.svelte` + - Create `frontend/src/components/AWSSetupGuide.svelte` for AWS configuration guidance + - Add API client functions in `frontend/src/lib/api.ts` for AWS endpoints + - _Requirements: 10.1, 13.1-13.7_ + + - [x] 15.2 Create Integration Config management pages + - Create `frontend/src/pages/IntegrationConfigPage.svelte` listing all registered integrations with config status + - Add per-integration config forms that call PUT /api/config/integrations/:name + - Mask sensitive field values by default in the UI, with reveal toggle + - Add navigation link in `frontend/src/components/Navigation.svelte` + - _Requirements: 21.1, 21.2, 21.3_ + +- [x] 16. Frontend — Journal Timeline UI + - [x] 16.1 Create Journal timeline component and integrate into NodeDetailPage + - Create `frontend/src/components/JournalTimeline.svelte` displaying a paginated, sorted timeline of events with isLive badges, source icons, and event type labels + - Create `frontend/src/components/JournalNoteForm.svelte` for adding manual notes + - Add API client functions in `frontend/src/lib/api.ts` for journal endpoints + - Integrate JournalTimeline into `frontend/src/pages/NodeDetailPage.svelte` as a new tab + - _Requirements: 23.1, 23.3, 24.1_ + +- [x] 17. Frontend — RBAC UI Updates + - [x] 17.1 Update role management UI for new permissions + - Update `frontend/src/components/RoleDetailDialog.svelte` to display and manage new permission resources (proxmox, aws, journal, integration_config) and actions (provision, destroy, lifecycle, configure, note) + - Update `frontend/src/lib/permissions.ts` with new permission types + - Show Provisioner role in `frontend/src/pages/RoleManagementPage.svelte` + - _Requirements: 27.1, 27.2, 27.3, 27.4, 28.1_ + +- [x] 18. Checkpoint - Frontend complete + - Ensure all tests pass, ask the user if questions arise. + - Verify Proxmox VM/LXC forms render correctly, AWS provisioning form works, journal timeline displays events, and RBAC UI shows new permissions. + +- [x] 19. Wire Journal Events into Plugins + - [x] 19.1 Integrate JournalService into ProxmoxIntegration and AWSPlugin + - Pass JournalService to ProxmoxIntegration and AWSPlugin constructors + - Record journal entries in ProxmoxService after every provisioning and lifecycle action (success or failure) with source "proxmox" + - Record journal entries in AWSPlugin after every provisioning and lifecycle action (success or failure) with source "aws" + - _Requirements: 10.4, 11.4, 22.1, 22.2, 22.3, 25.1, 25.2_ + + - [ ]* 19.2 Write property test: Journal Entry Completeness + - **Property 10: Journal Entry Completeness** + - For any completed action, exactly one journal entry is recorded with nodeId, nodeUri, eventType, source, action, summary, and timestamp + - **Validates: Requirements 10.4, 11.4, 22.1, 22.2, 22.3, 22.4** + +- [x] 20. Final Integration and Wiring + - [x] 20.1 Update server.ts with complete 1.0.0 bootstrap + - Ensure server.ts creates DatabaseAdapter via factory, runs migrations, and passes adapter to all services + - Instantiate JournalService, IntegrationConfigService with adapter + - Wire IntegrationConfigService effective configs into plugin initialization + - Register all new routes (journal, integrationConfig, aws) + - Ensure IntegrationManager enforces plugin name uniqueness (already does, verify) + - _Requirements: 4.1, 4.2, 6.4, 31.1, 31.2_ + +- [x] 21. Final checkpoint - Ensure all tests pass + - Ensure all tests pass, ask the user if questions arise. + - Verify end-to-end: database adapter works with SQLite, all services use adapter, AWS plugin registers and discovers, Proxmox routes VM/LXC correctly, journal records events from plugins, config storage encrypts/decrypts, RBAC enforces new permissions. + +## Notes + +- Tasks marked with `*` are optional and can be skipped for faster MVP +- Each task references specific requirements for traceability +- Checkpoints ensure incremental validation +- Property tests validate universal correctness properties from the design document +- Database abstraction (tasks 1-4) is foundational and must complete before other features +- The design uses TypeScript throughout — all implementations use TypeScript diff --git a/.kiro/steering/git-best-practices.md b/.kiro/steering/git-best-practices.md index 95d91ff0..29b17416 100644 --- a/.kiro/steering/git-best-practices.md +++ b/.kiro/steering/git-best-practices.md @@ -38,3 +38,49 @@ inclusion: always - Use environment variables for configuration - Review commits for sensitive information - Use signed commits when possible + +## Pre-Commit Hooks + +This project uses [pre-commit](https://pre-commit.com) with the following hooks that must pass before every commit: + +### File Quality + +- `trailing-whitespace` — strips trailing whitespace (excludes `.md`) +- `end-of-file-fixer` — ensures files end with a newline (excludes `.svg`) +- `mixed-line-ending` — enforces LF line endings +- `check-added-large-files` — blocks files over 1000KB +- `check-merge-conflict` — prevents committing merge conflict markers +- `check-case-conflict` — catches filename case collisions +- `check-yaml` / `check-json` — validates YAML and JSON syntax +- `no-commit-to-branch` — blocks direct commits to `main` / `master` + +### Security + +- `detect-private-key` — prevents committing private keys +- `detect-secrets` (Yelp) — scans for hardcoded secrets using a baseline file (`.secrets.baseline`) + - For false positives (e.g. variable names containing "password", "secret", "token"), add an inline `// pragma: allowlist secret` comment on the flagged line + - Excludes: `package-lock.json`, `.env.example`, `docs/`, test files, `e2e/` + +### Linting and Type Checking + +- `eslint` — runs `npm run lint` on JS/TS files +- `tsc-backend` — runs `tsc --noEmit` in `backend/` for TypeScript type checking +- `tsc-frontend` — runs `tsc --noEmit` in `frontend/` for TypeScript type checking +- `hadolint` — lints Dockerfiles (ignores DL3008, DL3009) +- `markdownlint` — lints and auto-fixes Markdown files +- `shellcheck` — checks shell scripts (severity: warning+) + +### Code Hygiene + +- `no-duplicate-files` — blocks files with suffixes like `_fixed`, `_clean`, `_backup`, `_old`, `_new`, `_copy`, `.bak` + +### Commit Messages + +- `conventional-pre-commit` — enforces [Conventional Commits](https://www.conventionalcommits.org/) format on commit messages + +### When Writing Code + +- Always ensure new code passes all pre-commit hooks before committing +- When `detect-secrets` flags a false positive, add `// pragma: allowlist secret` (or `# pragma: allowlist secret` for Python/YAML) as an inline comment on the flagged line +- Never disable or skip pre-commit hooks — fix the underlying issue instead +- Run `pre-commit run --all-files` to validate the entire codebase when in doubt diff --git a/.kiro/steering/product.md b/.kiro/steering/product.md new file mode 100644 index 00000000..4dd03bf1 --- /dev/null +++ b/.kiro/steering/product.md @@ -0,0 +1,33 @@ +--- +title: Product Summary +inclusion: always +--- + +## Pabawi + +Pabawi is a web-based command and control interface for infrastructure management. It provides a unified frontend for inventory browsing, remote execution, configuration inspection, and operations tracking across heterogeneous infrastructure tooling. + +### Core Capabilities + +- Multi-source inventory aggregation (Bolt, PuppetDB, Ansible, SSH, Proxmox, AWS) +- Ad-hoc command execution and Bolt task execution on remote nodes +- Ansible playbook execution +- Package management across infrastructure +- Execution history with re-execution support +- Node facts browsing from Puppet agents +- Puppet reports with metrics, resource changes, and run history +- Catalog inspection with cross-environment diff +- Hiera data browser with key usage analysis +- Real-time streaming output for command/task execution +- RBAC authentication with role-based permissions +- Proxmox and AWS VM/container provisioning +- Expert mode with full command lines and debug output +- Graceful degradation when individual integrations are unavailable + +### Target Users + +Infrastructure engineers and DevOps teams managing servers with Puppet/OpenVox, Bolt, Ansible, and SSH. Supports both Puppet Enterprise and Open Source Puppet/OpenVox. + +### Current Version + +v0.10.0 — Licensed under Apache 2.0. diff --git a/.kiro/steering/security-best-practices.md b/.kiro/steering/security-best-practices.md index 545be8a1..7bbe9889 100644 --- a/.kiro/steering/security-best-practices.md +++ b/.kiro/steering/security-best-practices.md @@ -10,6 +10,7 @@ inclusion: always - Validate all user inputs - Use parameterized queries to prevent SQL injection - Implement proper authentication and authorization +- Use `// pragma: allowlist secret` as an inline comment to allowlist false positives caught by `detect-secrets` pre-commit hook (use `# pragma: allowlist secret` for Python/YAML files) ## Dependency Management diff --git a/.kiro/steering/structure.md b/.kiro/steering/structure.md new file mode 100644 index 00000000..1a8f85a4 --- /dev/null +++ b/.kiro/steering/structure.md @@ -0,0 +1,80 @@ +--- +title: Project Structure +inclusion: always +--- + +## Top-Level Layout + +``` +pabawi/ +├── frontend/ # Svelte 5 SPA +├── backend/ # Express API server +├── e2e/ # Playwright E2E tests +├── docs/ # User-facing documentation +├── scripts/ # Setup, build, and deployment scripts +├── samples/ # Sample configs and stress tests +├── .kiro/ # AI-generated docs, specs, steering, hooks +├── package.json # Root workspace config +├── docker-compose.yml # Container orchestration +├── eslint.config.js # Shared ESLint config +└── playwright.config.ts # E2E test config +``` + +## Backend (`backend/src/`) + +``` +src/ +├── server.ts # Express app entry point +├── config/ # ConfigService + Zod schema for env validation +├── database/ # DB adapters (SQLite/Postgres), migrations, repositories +├── errors/ # Centralized error handling service +├── integrations/ # Plugin architecture for external tools +│ ├── BasePlugin.ts # Abstract plugin base class +│ ├── IntegrationManager.ts # Plugin registry and lifecycle +│ ├── NodeLinkingService.ts # Cross-integration node identity linking +│ ├── bolt/ # Puppet Bolt integration +│ ├── puppetdb/ # PuppetDB queries (with circuit breaker + retry) +│ ├── puppetserver/ # Puppetserver API integration +│ ├── hiera/ # Hiera data parsing and resolution +│ ├── ansible/ # Ansible inventory and playbook execution +│ ├── ssh/ # Direct SSH execution (connection pool) +│ ├── aws/ # AWS EC2 provisioning +│ └── proxmox/ # Proxmox VM/LXC provisioning +├── middleware/ # Auth, RBAC, security, error handler, expert mode +├── routes/ # Express route handlers (one file per domain) +│ └── integrations/ # Integration-specific route handlers +├── services/ # Business logic (auth, groups, roles, execution, etc.) +│ └── journal/ # Node journal/notes service +├── utils/ # Shared helpers (API responses, caching, passwords) +└── validation/ # Zod schemas and command whitelist validation +``` + +## Frontend (`frontend/src/`) + +``` +src/ +├── main.ts # App entry point +├── App.svelte # Root component +├── app.css # Global styles (Tailwind) +├── components/ # UI components (flat, one file per component) +├── pages/ # Page-level components (route targets) +├── lib/ # Shared utilities, stores, and types +│ ├── api.ts # Backend API client +│ ├── auth.svelte.ts # Auth state (Svelte runes) +│ ├── router.svelte.ts # Client-side routing +│ ├── validation.ts # Input validation +│ ├── types/ # TypeScript type definitions +│ └── *.svelte.ts # Reactive stores (expert mode, theme, toast, etc.) +└── __tests__/ # Shared test utilities and generators +``` + +## Key Patterns + +- Integration plugins extend `BasePlugin` and register with `IntegrationManager` +- Each integration has its own directory with Plugin, Service, and types files +- Routes delegate to services; services use integrations or repositories +- Frontend components are flat in `components/` (no nested folders) +- Svelte runes files use `.svelte.ts` extension for reactive state +- Tests are co-located with source (`*.test.ts`) or in `__tests__/` directories +- Database migrations are sequential SQL files (`000_`, `001_`, etc.) +- Configuration flows through `ConfigService` backed by `backend/.env` diff --git a/.kiro/steering/tech.md b/.kiro/steering/tech.md new file mode 100644 index 00000000..a205baef --- /dev/null +++ b/.kiro/steering/tech.md @@ -0,0 +1,90 @@ +--- +title: Tech Stack +inclusion: always +--- + +## Language & Runtime + +- TypeScript (strict mode) across frontend and backend +- Node.js 20+ runtime +- ES2022 target for both frontend and backend + +## Frontend + +- Svelte 5 with Vite 7 bundler +- TailwindCSS 3 for styling (PostCSS + Autoprefixer) +- Client-side routing (custom `router.svelte.ts`) +- Svelte runes for state management (`*.svelte.ts` files) +- Dev server on port 5173, proxies `/api` to backend on port 3000 + +## Backend + +- Express 4 HTTP server with TypeScript +- `tsx` for development (watch mode) +- `tsc` for production builds (CommonJS output to `dist/`) +- SQLite3 (primary) and PostgreSQL (optional) via adapter pattern +- SQL migrations in `src/database/migrations/` +- Zod for request validation +- JWT (jsonwebtoken) for authentication +- bcrypt for password hashing +- ssh2 for SSH connections +- helmet + cors + express-rate-limit for security +- dotenv for configuration (`backend/.env`) + +## Monorepo Structure + +- npm workspaces (`frontend`, `backend`) +- Root `package.json` orchestrates cross-workspace scripts +- Shared ESLint config at root (`eslint.config.js`) +- `typescript-eslint` strict + stylistic type-checked rules +- Key ESLint rules: `explicit-function-return-type`, `no-explicit-any`, `consistent-type-imports` + +## Testing + +- Vitest for unit tests (both frontend and backend) +- Backend tests: `environment: 'node'`, files in `test/` and `src/integrations/*/__tests__/` +- Frontend tests: `environment: 'jsdom'`, co-located `*.test.ts` files +- fast-check for property-based testing +- supertest for HTTP endpoint testing +- @testing-library/svelte for component tests +- Playwright for E2E tests (`e2e/` directory, Chromium, base URL `http://localhost:3000`) + +## Pre-commit + +- pre-commit framework with conventional commits enforcement +- ESLint, tsc (both workspaces), hadolint, markdownlint, shellcheck +- detect-secrets with `.secrets.baseline` + +## Common Commands + +```bash +# Install all dependencies +npm run install:all + +# Development +npm run dev:frontend # Vite dev server (port 5173) +npm run dev:backend # tsx watch (port 3000) +npm run dev:fullstack # Build frontend, copy to backend/public, run backend + +# Build +npm run build # Build frontend + copy + build backend + +# Test +npm test # Run backend + frontend unit tests (vitest --run) +npm run test:e2e # Playwright E2E tests + +# Lint +npm run lint # ESLint both workspaces +npm run lint:fix # ESLint autofix both workspaces + +# Pre-commit +npm run precommit # Run all pre-commit hooks +npm run setup:hooks # Install pre-commit hooks +``` + +## Deployment + +- Docker support with multi-stage builds (Dockerfile, Dockerfile.alpine, Dockerfile.ubuntu) +- docker-compose.yml for container orchestration +- Frontend built and copied into `backend/public/` for single-server serving +- Health check endpoint at `/api/health` diff --git a/.kiro/summaries/pabawi-workspace-analysis.md b/.kiro/summaries/pabawi-workspace-analysis.md new file mode 100644 index 00000000..e5f7a398 --- /dev/null +++ b/.kiro/summaries/pabawi-workspace-analysis.md @@ -0,0 +1,642 @@ +# Pabawi Workspace Analysis + +**Date**: 2025-02-27 +**Version**: 0.10.0 +**Purpose**: Comprehensive overview of Pabawi application structure, technology stack, and infrastructure setup for Azure integration planning + +## Executive Summary + +Pabawi is a **unified web interface for infrastructure management and remote execution** that aggregates multiple automation tools (Puppet, Bolt, Ansible, PuppetDB, Puppetserver, Hiera) through a plugin-based architecture. It's a **full-stack Node.js application** with a Svelte 5 frontend and Express backend, currently deployed via Docker with **no existing cloud provider integration** (AWS, Azure, GCP). + +--- + +## 1. Application Type & Purpose + +### What is Pabawi? + +**Type**: Web application (SPA + REST API backend) + +**Core Function**: + +- Unified command & control interface for infrastructure automation +- Multi-source inventory aggregation (Bolt, PuppetDB, Ansible, SSH) +- Remote command/task execution with real-time streaming +- Node facts, Puppet reports, catalog inspection, Hiera data browsing +- Execution history tracking and re-execution capability + +**Key Features**: + +- Multi-source inventory from Bolt, PuppetDB, Ansible, SSH +- Ad-hoc command execution with whitelist security +- Bolt task execution with parameter discovery +- Package management across infrastructure +- Real-time streaming output +- Expert mode with full command lines and debug output +- Graceful degradation when integrations unavailable + +--- + +## 2. Technology Stack + +### Frontend + +| Component | Technology | Version | +|-----------|-----------|---------| +| Framework | Svelte | 5.0.0 | +| Build Tool | Vite | 7.2.2 | +| Styling | Tailwind CSS | 3.4.3 | +| Testing | Vitest | 4.0.8 | +| Testing Library | @testing-library/svelte | 5.0.0 | +| Language | TypeScript | 5.4.5 | +| Linting | ESLint | 9.39.1 | + +**Frontend Structure**: + +``` +frontend/ +├── src/ +│ ├── components/ # UI components +│ ├── pages/ # Page components +│ └── lib/ # Utilities and stores +├── package.json +├── vite.config.ts +├── tailwind.config.js +└── tsconfig.json +``` + +### Backend + +| Component | Technology | Version | +|-----------|-----------|---------| +| Runtime | Node.js | 20.x | +| Framework | Express | 4.19.2 | +| Language | TypeScript | 5.4.5 | +| Database | SQLite3 | 5.1.7 | +| Database (Alt) | PostgreSQL | 8.13.0 | +| Authentication | JWT | 9.0.2 | +| Password Hashing | bcrypt | 5.1.1 | +| Security | Helmet | 8.1.0 | +| Rate Limiting | express-rate-limit | 8.2.1 | +| CORS | cors | 2.8.5 | +| SSH | ssh2 | 1.17.0 | +| AWS SDK | @aws-sdk/client-ec2, @aws-sdk/client-sts | 3.700.0 | +| Config | dotenv | 16.4.5 | +| Validation | zod | 3.23.8 | +| YAML | yaml | 2.8.2 | +| Testing | Vitest | 4.0.9 | +| Testing | supertest | 7.0.0 | +| Dev Runtime | tsx | 4.7.2 | + +**Backend Structure**: + +``` +backend/ +├── src/ +│ ├── config/ # Configuration service +│ ├── database/ # SQLite/PostgreSQL adapters +│ ├── integrations/ # Plugin architecture +│ │ ├── bolt/ # Bolt plugin +│ │ ├── puppetdb/ # PuppetDB integration +│ │ ├── puppetserver/ # Puppetserver integration +│ │ ├── hiera/ # Hiera integration +│ │ ├── ansible/ # Ansible plugin +│ │ ├── ssh/ # SSH plugin +│ │ ├── proxmox/ # Proxmox integration +│ │ └── types.ts # Plugin interfaces +│ ├── routes/ # API endpoints +│ ├── services/ # Business logic +│ └── server.ts # Express app entry +├── test/ # Unit/integration tests +├── package.json +└── tsconfig.json +``` + +### Key Dependencies + +**AWS Integration** (Already Present): + +- `@aws-sdk/client-ec2` - EC2 operations +- `@aws-sdk/client-sts` - STS operations +- Indicates some AWS support already exists + +**Database Support**: + +- SQLite3 (default, file-based) +- PostgreSQL (alternative, for production) +- Adapter pattern allows easy addition of other databases + +--- + +## 3. Deployment & Infrastructure + +### Docker Configuration + +**Dockerfile Strategy**: + +- Multi-stage build (frontend builder → backend builder → production image) +- Base image: Ubuntu 24.04 (production) +- Includes: Node.js 20, Puppet/OpenVox, Bolt, Ansible, Ruby +- Non-root user: UID 1001 (pabawi) +- Health check: HTTP GET to `/api/health` +- Exposed port: 3000 + +**Docker Compose**: + +- Single service deployment +- Volume mounts for: + - Bolt project (read-only) + - SQLite database persistence + - SSL certificates (optional) + - Hiera control repository (optional) +- Network: pabawi-network (bridge) +- Restart policy: unless-stopped +- Health checks enabled + +**Alternative Dockerfiles**: + +- `Dockerfile.alpine` - Alpine Linux base (smaller) +- `Dockerfile.ubuntu` - Explicit Ubuntu base + +**Multi-Architecture Support**: + +- Builds for linux/amd64 and linux/arm64 +- Uses docker buildx for cross-platform builds + +### Kubernetes Deployment + +**Kubernetes Support** (Documented): + +- Deployment manifest with 1 replica (SQLite limitation) +- ConfigMap for environment variables +- PersistentVolumeClaim for database storage +- Service for exposure +- Secrets for SSH keys and SSL certificates +- Liveness and readiness probes + +**Limitation**: Single replica due to SQLite database (not shared) + +### CI/CD Pipeline + +**GitHub Actions Workflows**: + +1. **CI Workflow** (`.github/workflows/ci.yml`): + - Triggers: PR to main/develop, push to main/develop + - Node 20.x matrix + - Steps: + - ESLint linting + - TypeScript type checking (backend & frontend) + - Unit tests (vitest) + - Backend build + - Frontend build + - Docker image build test (no push) + +2. **Publish Workflow** (`.github/workflows/publish.yml`): + - Triggers: Git tags matching `v*.*.*` + - Builds and pushes to GitHub Container Registry (ghcr.io) + - Multi-architecture builds (amd64, arm64) + - Semantic versioning tags + - Artifact attestation + - Automatic GitHub Release creation from CHANGELOG.md + +**Registry**: GitHub Container Registry (ghcr.io) +**Image Name**: `ghcr.io/example42/pabawi` + +--- + +## 4. Current Cloud/Infrastructure Integration + +### Existing Cloud Support + +**AWS Integration** (Partial): + +- AWS SDK dependencies present (`@aws-sdk/client-ec2`, `@aws-sdk/client-sts`) +- Indicates some AWS EC2/STS functionality exists +- **No dedicated AWS plugin found** in integrations directory +- Likely used for specific features, not as primary integration + +**No Other Cloud Providers**: + +- No Azure SDK dependencies +- No GCP SDK dependencies +- No Terraform/CloudFormation/Bicep files +- No infrastructure-as-code configuration + +### Current Integrations (Non-Cloud) + +**Execution Tools**: + +1. **Bolt** - Puppet's execution tool (primary) +2. **Ansible** - Configuration management +3. **SSH** - Direct SSH connections + +**Information Sources**: + +1. **PuppetDB** - Puppet infrastructure data +2. **Puppetserver** - Puppet node management +3. **Hiera** - Hierarchical configuration data +4. **Proxmox** - Virtualization platform (partial) + +**Plugin Architecture**: + +- Base interface: `IntegrationPlugin` +- Types: ExecutionToolPlugin, InformationSourcePlugin +- All plugins extend `BasePlugin` +- Plugin registration via `IntegrationManager` +- Health check system with caching +- Node linking service for multi-source aggregation + +--- + +## 5. Configuration & Environment + +### Configuration Management + +**ConfigService** (`backend/src/config/ConfigService.ts`): + +- Loads from environment variables +- Parses integration-specific configs +- Validates configuration on startup +- Supports multiple integrations simultaneously + +**Environment Variables** (from `.env.docker`): + +**Core Settings**: + +``` +PORT=3000 +HOST=localhost +LOG_LEVEL=info +DATABASE_PATH=/pabawi/data/pabawi.db +``` + +**Bolt Integration**: + +``` +COMMAND_WHITELIST_ALLOW_ALL=false +COMMAND_WHITELIST=["ls","pwd","whoami","uptime"] +BOLT_EXECUTION_TIMEOUT=300000 +BOLT_PROJECT_PATH=/pabawi/control-repo +``` + +**PuppetDB Integration**: + +``` +PUPPETDB_ENABLED=true +PUPPETDB_SERVER_URL=https://puppet.example.com +PUPPETDB_PORT=8081 +PUPPETDB_TOKEN= +PUPPETDB_SSL_ENABLED=true +PUPPETDB_SSL_CA=/pabawi/certs/ca.pem +PUPPETDB_SSL_CERT=/pabawi/certs/pabawi.pem +PUPPETDB_SSL_KEY=/pabawi/certs/pabawi-key.pem +PUPPETDB_SSL_REJECT_UNAUTHORIZED=true +``` + +**Puppetserver Integration**: + +``` +PUPPETSERVER_ENABLED=true +PUPPETSERVER_SERVER_URL=https://puppet.example.com +PUPPETSERVER_PORT=8140 +PUPPETSERVER_TOKEN= +PUPPETSERVER_SSL_ENABLED=true +PUPPETSERVER_SSL_CA=/pabawi/certs/ca.pem +PUPPETSERVER_SSL_CERT=/pabawi/certs/pabawi.pem +PUPPETSERVER_SSL_KEY=/pabawi/certs/pabawi-key.pem +PUPPETSERVER_SSL_REJECT_UNAUTHORIZED=true +``` + +**Hiera Integration**: + +``` +HIERA_ENABLED=true +HIERA_CONTROL_REPO_PATH=/pabawi/control-repo +HIERA_CONFIG_PATH=hiera.yaml +``` + +**Ansible Integration**: + +``` +ANSIBLE_ENABLED=true +ANSIBLE_PROJECT_PATH=/pabawi/ansible +ANSIBLE_INVENTORY_PATH=inventory/hosts +``` + +**SSH Integration**: + +``` +SSH_ENABLED=true +SSH_CONFIG_PATH=/pabawi/ssh/config +``` + +### Database Configuration + +**SQLite** (Default): + +- File-based database +- Path: `DATABASE_PATH` environment variable +- Single-file storage +- Limitation: Not suitable for multi-replica deployments + +**PostgreSQL** (Alternative): + +- Connection string via environment variable +- Adapter pattern in `backend/src/database/` +- Supports production deployments with multiple replicas + +--- + +## 6. Project Structure & Key Files + +### Root Level + +``` +pabawi/ +├── frontend/ # Svelte 5 frontend +├── backend/ # Node.js/Express backend +├── docs/ # User-facing documentation +├── e2e/ # Playwright end-to-end tests +├── scripts/ # Setup and utility scripts +├── .github/workflows/ # CI/CD pipelines +├── .devcontainer/ # Dev container config +├── Dockerfile # Production Docker image +├── Dockerfile.alpine # Alpine variant +├── Dockerfile.ubuntu # Ubuntu variant +├── docker-compose.yml # Docker Compose config +├── package.json # Root workspace config +├── .env.docker # Docker environment template +├── .env.example # Environment template +├── .pre-commit-config.yaml # Pre-commit hooks +└── README.md # Main documentation +``` + +### Documentation Files (Relevant for Azure) + +- `docs/docker-deployment.md` - Docker deployment guide +- `docs/kubernetes-deployment.md` - Kubernetes deployment guide +- `docs/configuration.md` - Configuration reference +- `docs/architecture.md` - System architecture +- `docs/integrations-api.md` - Integration plugin API + +### Scripts + +- `scripts/setup.sh` - Interactive setup script +- `scripts/docker-build-multiarch.sh` - Multi-arch Docker build +- `scripts/docker-run.sh` - Docker run helper +- `scripts/docker-run.ps1` - PowerShell Docker run helper +- `scripts/generate-pabawi-cert.sh` - SSL certificate generation + +--- + +## 7. Database Architecture + +### Current Database Support + +**SQLite3** (Default): + +- File-based, single-file database +- Located at `DATABASE_PATH` (default: `/data/pabawi.db`) +- Suitable for single-instance deployments +- Limitation: Database locking with multiple replicas + +**PostgreSQL** (Alternative): + +- Connection string: `DATABASE_URL` environment variable +- Suitable for production multi-replica deployments +- Adapter pattern allows easy switching + +### Database Adapter Pattern + +**Files**: + +- `backend/src/database/DatabaseAdapter.ts` - Interface +- `backend/src/database/SQLiteAdapter.ts` - SQLite implementation +- `backend/src/database/PostgresAdapter.ts` - PostgreSQL implementation +- `backend/src/database/AdapterFactory.ts` - Factory for creating adapters + +**Migrations**: + +- `backend/src/database/MigrationRunner.ts` - Migration system +- Migrations stored in `backend/src/database/migrations/` +- Automatic migration on startup + +--- + +## 8. Security Features + +### Authentication & Authorization + +- JWT-based authentication +- bcrypt password hashing +- Role-based access control (RBAC) framework +- Token-based API authentication + +### Network Security + +- Helmet.js for HTTP headers +- CORS configuration +- Rate limiting via express-rate-limit +- SSL/TLS support for integrations +- Certificate validation options + +### Secrets Management + +- Environment variables for sensitive data +- SSL certificates stored on filesystem +- SSH keys managed via SSH config +- No hardcoded secrets in code + +### Pre-Commit Hooks + +- detect-secrets scanning +- Private key detection +- Conventional commit enforcement +- ESLint and TypeScript type checking + +--- + +## 9. Testing & Quality Assurance + +### Testing Framework + +- **Unit Tests**: Vitest +- **Integration Tests**: Vitest + supertest +- **E2E Tests**: Playwright +- **Test Coverage**: Property-based testing with fast-check + +### Linting & Type Checking + +- ESLint for code quality +- TypeScript strict mode +- Separate type checking for backend and frontend +- Pre-commit hooks enforce standards + +### CI/CD Quality Gates + +- All tests must pass +- ESLint must pass with zero warnings +- TypeScript type checking must pass +- Docker image must build successfully + +--- + +## 10. Deployment Patterns + +### Local Development + +```bash +npm run dev:backend # Port 3000 +npm run dev:frontend # Port 5173 +npm run dev:fullstack # Port 3000 (full-stack) +``` + +### Docker Deployment + +```bash +docker run -d \ + --name pabawi \ + -p 127.0.0.1:3000:3000 \ + -v $(pwd)/bolt-project:/bolt-project:ro \ + -v $(pwd)/data:/data \ + --env-file .env \ + example42/pabawi:latest +``` + +### Docker Compose + +```bash +docker-compose up -d +``` + +### Kubernetes + +- Single replica deployment (SQLite limitation) +- ConfigMap for configuration +- PersistentVolumeClaim for data +- Service for exposure +- Liveness/readiness probes + +--- + +## 11. Key Observations for Azure Integration + +### Strengths + +1. **Plugin Architecture**: Easy to add new integrations (including Azure) +2. **Database Abstraction**: Adapter pattern allows Azure SQL/Cosmos DB support +3. **Environment-Based Config**: No code changes needed for cloud configuration +4. **Docker-Ready**: Already containerized for cloud deployment +5. **Kubernetes Support**: Can run on AKS +6. **Multi-Architecture**: Supports both amd64 and arm64 + +### Considerations for Azure + +1. **Database**: Currently SQLite (single-instance). PostgreSQL or Azure SQL recommended for production +2. **Storage**: Volume mounts for data. Azure Storage or managed disks needed for cloud +3. **Secrets**: Environment variables. Azure Key Vault integration recommended +4. **Authentication**: JWT-based. Azure AD/Entra ID integration possible +5. **Monitoring**: No built-in cloud monitoring. Application Insights integration needed +6. **Networking**: Currently localhost-only. Azure networking/security groups needed +7. **CI/CD**: GitHub Actions to Azure DevOps or GitHub Actions with Azure deployment + +### Existing AWS SDK + +- AWS SDK already present (`@aws-sdk/client-ec2`, `@aws-sdk/client-sts`) +- Suggests pattern for adding Azure SDK +- Could indicate existing AWS integration to reference + +--- + +## 12. Roadmap & Future Considerations + +### Planned Features + +- Tiny Puppet integration +- Scheduled executions +- Custom dashboards +- Audit logging +- CLI tool + +### Under Evaluation + +- Terraform integration +- AWS/Azure CLI integration +- Kubernetes integration +- Choria integration +- Icinga integration + +### Potential Azure Integrations + +- Azure VMs (similar to AWS EC2) +- Azure Automation +- Azure DevOps +- Azure Key Vault (secrets) +- Application Insights (monitoring) +- Azure SQL Database +- Azure Container Registry +- Azure Kubernetes Service (AKS) + +--- + +## 13. Files Relevant for Azure Integration + +### Configuration + +- `backend/src/config/ConfigService.ts` - Add Azure config parsing +- `.env.example` - Add Azure environment variables +- `docs/configuration.md` - Document Azure settings + +### Database + +- `backend/src/database/AdapterFactory.ts` - Add Azure SQL adapter +- `backend/src/database/` - Create AzureSqlAdapter.ts + +### Integrations + +- `backend/src/integrations/types.ts` - Plugin interfaces +- `backend/src/integrations/` - Create azure/ subdirectory for Azure plugin + +### Deployment + +- `Dockerfile` - May need Azure-specific dependencies +- `docker-compose.yml` - Azure deployment variant +- `docs/kubernetes-deployment.md` - AKS deployment guide +- `.github/workflows/` - Azure deployment workflow + +### Documentation + +- `docs/` - Create azure-deployment.md +- `docs/integrations/` - Create azure.md for Azure integration setup + +--- + +## 14. Summary Table + +| Aspect | Current State | Notes | +|--------|---------------|-------| +| **Application Type** | Full-stack web app | Svelte frontend + Express backend | +| **Frontend Framework** | Svelte 5 | Modern, reactive framework | +| **Backend Runtime** | Node.js 20 | TypeScript, Express | +| **Database** | SQLite (default) | PostgreSQL alternative available | +| **Deployment** | Docker + Kubernetes | Multi-architecture support | +| **CI/CD** | GitHub Actions | Publishes to ghcr.io | +| **Cloud Providers** | None (AWS SDK present) | No Azure, GCP, or Terraform | +| **Plugin Architecture** | Yes | Extensible integration system | +| **Authentication** | JWT + bcrypt | RBAC framework exists | +| **Testing** | Vitest + Playwright | Unit, integration, E2E | +| **Security** | Helmet, rate limiting, SSL | Pre-commit hooks with secret detection | +| **Documentation** | Comprehensive | Docker, Kubernetes, integrations | +| **Monitoring** | Health checks only | No cloud monitoring integration | + +--- + +## Recommendations for Azure Integration + +1. **Start with Azure VM Integration**: Similar to existing AWS EC2 support +2. **Add Azure SQL Database Adapter**: For production deployments +3. **Implement Azure Key Vault**: For secrets management +4. **Create AKS Deployment Guide**: For Kubernetes deployments +5. **Add Azure DevOps CI/CD**: Alternative to GitHub Actions +6. **Integrate Application Insights**: For monitoring and logging +7. **Support Azure AD/Entra ID**: For authentication +8. **Create Azure Storage Integration**: For persistent volumes diff --git a/.kiro/todo/REMAINING_TODOS_REPORT.md b/.kiro/todo/REMAINING_TODOS_REPORT.md new file mode 100644 index 00000000..79097a21 --- /dev/null +++ b/.kiro/todo/REMAINING_TODOS_REPORT.md @@ -0,0 +1,289 @@ +# Pabawi Remaining TODOs - Prioritized Report + +Generated: March 11, 2026 + +## Completed Items (Moved to done/) + +- ✅ Node Linking Redesign - Backend implementation complete +- ✅ Database Schema Cleanup - Migration-first approach implemented +- ✅ Provisioning Endpoint Fix - Backend endpoint created and working +- ✅ Default User Permissions Fix - Viewer role auto-assignment implemented +- ✅ Proxmox SSL Fix - Environment variable configuration working +- ✅ Batch Execution Missing Action - executeAction method added +- ✅ Docker Missing Schema Files - Dockerfile updated to copy database directory + +--- + +## HIGH PRIORITY + +### 1. Test Failures Analysis (47 remaining failures) + +**File**: `test-failures-analysis.md` +**Impact**: Blocking CI/CD, test suite reliability +**Effort**: Medium (2-3 hours) + +**Remaining Issues**: + +- User Roles Tests: Extra viewer role causing count mismatches (~17 failures) +- RBAC Middleware Logging: Log format doesn't match expectations (2 failures) +- SSH Plugin Test: Node not found in inventory (1 failure) +- Property Test: `__proto__` obfuscation returns undefined (1 failure) +- Brute Force Test: SQL syntax error (1 failure) +- Batch Execution Tests: Logic issues (2-3 failures) + +**Next Steps**: + +1. Fix users.test.ts role assignment expectations (highest impact) +2. Update RBAC logging test expectations +3. Fix remaining edge cases + +--- + +### 2. RBAC Test Failures (115 failures - Error Format Mismatch) + +**File**: `rbac-test-failures.md` +**Impact**: Test suite reliability, API consistency +**Effort**: Low (1-2 hours) + +**Issue**: Tests expect simple string errors but implementation returns structured error objects. + +**Recommended Fix**: Update test assertions to match structured error format: + +```javascript +// Change from: +expect(response.body.error).toBe('Unauthorized'); +// To: +expect(response.body.code).toBe('UNAUTHORIZED'); +expect(response.body.message).toBeDefined(); +``` + +**Affected Files**: + +- `test/routes/groups.test.ts` (10 failures) +- `test/routes/roles-permissions.test.ts` (2 failures) +- `test/routes/users.test.ts` (33 failures) +- Integration tests (6 failures - unrelated ansible integration) + +--- + +### 3. Auth Test Database Lifecycle (67 failures) + +**File**: `auth-test-database-lifecycle.md` +**Impact**: Test infrastructure, not blocking production +**Effort**: Medium (2-3 hours) + +**Issue**: `SQLITE_MISUSE: Database is closed` errors due to async operations running after database closes. + +**Recommended Solution**: Use single database per test suite instead of per-test: + +```typescript +beforeAll(async () => { + db = new Database(':memory:'); + await initializeSchema(db); +}); + +afterAll(async () => { + await closeDatabase(db); +}); + +beforeEach(async () => { + await clearTestData(db); +}); +``` + +--- + +## MEDIUM PRIORITY + +### 4. Environment Configuration Issues + +**File**: `env-configuration-issues.md` +**Impact**: Configuration clarity, potential confusion +**Effort**: Low (30 minutes) + +**Issues**: + +- `STREAMING_BUFFER_SIZE=1024` should be `STREAMING_BUFFER_MS=100` +- Unused priority variables: `BOLT_PRIORITY`, `PUPPETDB_PRIORITY` +- Missing documentation in `.env.example` + +**Actions**: + +1. Fix variable name in `.env` +2. Remove or implement priority variables +3. Update `.env.example` + +--- + +### 5. Inventory Multiple Source Tags Bug + +**File**: `inventory-multiple-source-tags-bug.md` +**Impact**: User experience, visibility of multi-source nodes +**Effort**: Medium (2-3 hours) + +**Issue**: `puppet.office.lab42` only shows "PuppetDB" tag but should also show "Bolt" tag. + +**Investigation Needed**: + +1. Check identifier extraction for this node from both sources +2. Verify both sources return this node +3. Debug node linking process +4. Test `/api/inventory` endpoint + +--- + +### 6. Expert Mode Prototype Pollution + +**File**: `expert-mode-prototype-pollution.md` +**Impact**: Security vulnerability (not actively exploited) +**Effort**: Low (1 hour) + +**Issue**: Property-based test reveals metadata handling doesn't sanitize dangerous property names like `__proto__`, `constructor`, `prototype`. + +**Fix**: + +```typescript +const DANGEROUS_KEYS = ['__proto__', 'constructor', 'prototype']; + +addMetadata(debugInfo: DebugInfo, key: string, value: unknown): void { + if (DANGEROUS_KEYS.includes(key)) { + return; // or sanitize + } + debugInfo.metadata[key] = value; +} +``` + +--- + +### 7. Proxmox Restart Required + +**File**: `proxmox-restart-required.md` +**Impact**: Deployment issue (one-time fix) +**Effort**: Minimal (restart server) + +**Issue**: Server running cached code with old undici import. + +**Solution**: Restart backend server to pick up updated code. + +--- + +## LOW PRIORITY + +### 8. Docker Improvements + +**File**: `docker-improvements.md` +**Impact**: Build optimization, security hardening +**Effort**: Medium (3-4 hours) + +**High Priority Items**: + +- Generate package-lock.json files for deterministic builds +- Add image metadata (LABEL instructions) +- Install only production dependencies in final stage + +**Medium Priority**: + +- Optimize image size (currently 440MB) +- Enhance .dockerignore + +**Low Priority**: + +- Build optimization with BuildKit cache +- Multi-platform support +- Security scanning automation + +--- + +### 9. Hiera Classification Mode Toggle + +**File**: `hiera-classification-mode-toggle.md` +**Impact**: Enhancement feature +**Effort**: Medium (2-3 hours) + +**Status**: Frontend UI implemented, backend needs work. + +**Backend Changes Needed**: + +1. Add `classificationMode` query parameter to API +2. Update `HieraService.classifyKeyUsage()` with mode parameter +3. Implement both classification strategies (found vs class-matched) + +**Dependencies**: Requires fixing class detection first. + +--- + +### 10. Proxmox Not Initialized Issue + +**File**: `proxmox-not-initialized-issue.md` +**Status**: Empty file - likely resolved or duplicate + +**Action**: Review and delete if no longer relevant. + +--- + +## Summary Statistics + +**Total TODOs Reviewed**: 17 +**Completed**: 7 (41%) +**Remaining**: 10 (59%) + +**By Priority**: + +- High: 3 items (test failures, RBAC tests, auth lifecycle) +- Medium: 5 items (env config, inventory bug, security, proxmox restart, docker) +- Low: 2 items (docker improvements, hiera toggle) + +**Estimated Total Effort**: 15-20 hours + +--- + +## Recommended Action Plan + +**Week 1 - Critical Path**: + +1. Fix test failures (users.test.ts role assignments) - 2 hours +2. Update RBAC test assertions - 1 hour +3. Fix remaining test edge cases - 2 hours +4. Fix auth test database lifecycle - 3 hours + +**Week 2 - Quality & Security**: +5. Fix environment configuration issues - 30 min +6. Fix expert mode prototype pollution - 1 hour +7. Investigate inventory multiple source tags - 2 hours +8. Restart Proxmox (if still needed) - 5 min + +**Week 3 - Enhancements**: +9. Docker improvements (package-lock, metadata) - 2 hours +10. Hiera classification mode (if needed) - 3 hours + +--- + +## Prompt for Next Session + +``` +Review and fix the remaining test failures in the Pabawi project: + +1. HIGH PRIORITY - Fix users.test.ts role assignment tests (~17 failures) + - Issue: Tests expect specific role counts but users get auto-assigned viewer role + - Solution: Either set defaultNewUserRole: null in test setup or adjust expectations + - File: test/routes/users.test.ts + +2. Update RBAC test assertions to match structured error format (115 failures) + - Change from: expect(response.body.error).toBe('Unauthorized') + - Change to: expect(response.body.code).toBe('UNAUTHORIZED') + - Files: test/routes/groups.test.ts, test/routes/roles-permissions.test.ts, test/routes/users.test.ts + +3. Fix auth test database lifecycle issues (67 failures) + - Issue: SQLITE_MISUSE errors due to async operations after database closes + - Solution: Use single database per test suite with data cleanup between tests + - File: test/routes/auth.test.ts + +4. Fix remaining edge cases: + - RBAC middleware logging format (2 failures) + - SSH plugin node not found (1 failure) + - Property test __proto__ obfuscation (1 failure) + - Brute force SQL syntax error (1 failure) + - Batch execution logic (2-3 failures) + +Start with #1 as it has the highest impact (17 tests). +``` diff --git a/.kiro/todo/expert-mode-prototype-pollution.md b/.kiro/todo/expert-mode-prototype-pollution.md new file mode 100644 index 00000000..031431ea --- /dev/null +++ b/.kiro/todo/expert-mode-prototype-pollution.md @@ -0,0 +1,37 @@ +# Expert Mode Prototype Pollution Vulnerability + +## Issue + +Property-based test failure in `test/properties/expert-mode/property-6.test.ts` reveals a security vulnerability in the expert mode's metadata handling. + +## Details + +- **Test**: Property 6: Debug Info Completeness +- **Failure**: Metadata handling doesn't sanitize dangerous property names +- **Counterexample**: `[" "," ",0,[["__proto__",0],["",{}]]]` +- **Risk**: Prototype pollution vulnerability when adding metadata with keys like `__proto__`, `constructor`, or `prototype` + +## Recommendation + +Implement property name sanitization in `ExpertModeService.addMetadata()` to reject or sanitize dangerous property names: + +```typescript +const DANGEROUS_KEYS = ['__proto__', 'constructor', 'prototype']; + +addMetadata(debugInfo: DebugInfo, key: string, value: unknown): void { + if (DANGEROUS_KEYS.includes(key)) { + // Either reject or sanitize + return; + } + debugInfo.metadata[key] = value; +} +``` + +## Priority + +Medium - Security issue but not actively exploited in current usage + +## Related + +- Expert mode feature +- Not related to Proxmox Frontend UI spec diff --git a/.kiro/todo/proxmox-not-initialized-issue.md b/.kiro/todo/proxmox-not-initialized-issue.md new file mode 100644 index 00000000..e69de29b diff --git a/.kiro/todo/proxmox-restart-required.md b/.kiro/todo/proxmox-restart-required.md new file mode 100644 index 00000000..4a336c49 --- /dev/null +++ b/.kiro/todo/proxmox-restart-required.md @@ -0,0 +1,51 @@ +# Proxmox Integration - Restart Required + +## Issue + +The backend server is showing "Cannot find module 'undici'" error during Proxmox initialization, even though the undici import has been removed from the source code. + +## Root Cause + +The server is running with cached/compiled code that still contains the old undici import. TypeScript compilation or Node.js module caching is serving the old version. + +## Solution + +Restart the backend server to pick up the updated code: + +```bash +# Stop the current backend server (Ctrl+C if running in terminal) +# Then restart it +cd pabawi +npm run dev +``` + +## What Was Fixed + +1. Removed undici import from ProxmoxClient.ts +2. Changed SSL configuration to use `NODE_TLS_REJECT_UNAUTHORIZED` environment variable instead of undici's Agent +3. This approach works with Node.js 18+ native fetch API + +## Verification Steps + +After restarting: + +1. Check backend logs for successful Proxmox initialization +2. Visit the home page - Proxmox should show as "connected" (not "not initialized") +3. Health checks should pass without "fetch failed" errors + +## Current Configuration + +The .env file has correct Proxmox configuration: + +- PROXMOX_ENABLED=true +- PROXMOX_HOST=minis.office.lab42 +- PROXMOX_PORT=8006 +- PROXMOX_TOKEN configured +- PROXMOX_SSL_REJECT_UNAUTHORIZED=false + +## Expected Behavior After Restart + +- Proxmox integration initializes successfully +- Health checks pass +- Integration shows as "connected" on home page +- Provision page shows Proxmox resources diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f5d9fc65..5b61453f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: hooks: - id: no-duplicate-files name: Prevent duplicate files - entry: bash -c 'if git diff --cached --name-only | grep -E "(_fixed|_clean|_backup|_old|_new|_copy|\.bak)"; then echo "Duplicate/backup files detected"; exit 1; fi' + entry: bash -c 'if git diff --cached --name-only | grep -E "(_fixed|_clean|_backup|_old|_new|_copy|\.bak)(\.|$)" ; then echo "Duplicate/backup files detected"; exit 1; fi' language: system pass_filenames: false diff --git a/.secrets.baseline b/.secrets.baseline index 2c44b051..a256fece 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -133,121 +133,76 @@ } ], "results": { - "backend/.env": [ - { - "type": "Secret Keyword", - "filename": "backend/.env", - "hashed_secret": "631714444b8ae75c2da7e4aa09d209b26ce1242d", - "is_verified": false, - "line_number": 3 - } - ], - "backend/dist/backend/src/integrations/types.js": [ - { - "type": "Secret Keyword", - "filename": "backend/dist/backend/src/integrations/types.js", - "hashed_secret": "cb5329ae33afbd0e43bbb57e2cf2f8cc4be81203", - "is_verified": false, - "line_number": 48 - } - ], - "backend/dist/services/AuditLoggingService.d.ts": [ - { - "type": "Secret Keyword", - "filename": "backend/dist/services/AuditLoggingService.d.ts", - "hashed_secret": "94732424e16be827cd46aa294394ab3ce93b55f4", - "is_verified": false, - "line_number": 22 - } - ], - "backend/dist/services/AuditLoggingService.js": [ - { - "type": "Secret Keyword", - "filename": "backend/dist/services/AuditLoggingService.js", - "hashed_secret": "94732424e16be827cd46aa294394ab3ce93b55f4", - "is_verified": false, - "line_number": 28 - } - ], - "backend/public/assets/main-Cr9JgsA0.js": [ - { - "type": "Secret Keyword", - "filename": "backend/public/assets/main-Cr9JgsA0.js", - "hashed_secret": "e3f51cc694c0b3db46784030e8e6b2e8dff53cdc", - "is_verified": false, - "line_number": 2 - } - ], "backend/test/UserService.password.test.ts": [ { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "8ae2ad11392b6bef22ac6ed6a9bd4b524761bdbd", "is_verified": false, - "line_number": 35 + "line_number": 33 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "a6f9656f4481dd8999a7da13ef1ace9d3ecce7cd", "is_verified": false, - "line_number": 50 + "line_number": 48 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "addbd3aa5619f2932733104eb8ceef08f6fd2693", "is_verified": false, - "line_number": 62 + "line_number": 60 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "bb60f36c6c5320f3063113c6c549c0806ae28851", "is_verified": false, - "line_number": 74 + "line_number": 72 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "ef8420d70dd7676e04bea55f405fa39b022a90c8", "is_verified": false, - "line_number": 86 + "line_number": 84 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "b2e98ad6f6eb8508dd6a14cfa704bad7f05f6fb1", "is_verified": false, - "line_number": 98 + "line_number": 96 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", "is_verified": false, - "line_number": 110 + "line_number": 108 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "f9adc2a5795757efc547ecffe3e7b2736041e561", "is_verified": false, - "line_number": 124 + "line_number": 122 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "2635cfb26a9c6a74dde1701ea01dd842d1243178", "is_verified": false, - "line_number": 131 + "line_number": 129 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.password.test.ts", "hashed_secret": "1ded3053d0363079a4e681a3b700435d6d880290", "is_verified": false, - "line_number": 155 + "line_number": 153 } ], "backend/test/UserService.test.ts": [ @@ -256,21 +211,37 @@ "filename": "backend/test/UserService.test.ts", "hashed_secret": "49efef5f70d47adc2db2eb397fbef5f7bc560e29", "is_verified": false, - "line_number": 33 + "line_number": 35 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.test.ts", "hashed_secret": "73027c15d2f0904f8823803d682795f029da84c9", "is_verified": false, - "line_number": 202 + "line_number": 204 }, { "type": "Secret Keyword", "filename": "backend/test/UserService.test.ts", "hashed_secret": "e42162b52ade6af27b73ab86415b40d52ae2cc6d", "is_verified": false, - "line_number": 211 + "line_number": 213 + } + ], + "backend/test/config/ConfigService.test.ts": [ + { + "type": "AWS Access Key", + "filename": "backend/test/config/ConfigService.test.ts", + "hashed_secret": "d70eab08607a4d05faa2d0d6647206599e9abc65", + "is_verified": false, + "line_number": 183 + }, + { + "type": "Base64 High Entropy String", + "filename": "backend/test/config/ConfigService.test.ts", + "hashed_secret": "d70eab08607a4d05faa2d0d6647206599e9abc65", + "is_verified": false, + "line_number": 183 } ], "backend/test/integration/auth-flow.test.ts": [ @@ -279,7 +250,7 @@ "filename": "backend/test/integration/auth-flow.test.ts", "hashed_secret": "72559b51f94a7a3ad058c5740cbe2f7cb0d4080b", "is_verified": false, - "line_number": 60 + "line_number": 97 } ], "backend/test/integration/permission-inheritance.test.ts": [ @@ -288,7 +259,64 @@ "filename": "backend/test/integration/permission-inheritance.test.ts", "hashed_secret": "72559b51f94a7a3ad058c5740cbe2f7cb0d4080b", "is_verified": false, - "line_number": 57 + "line_number": 59 + } + ], + "backend/test/integrations/SSHService.test.ts": [ + { + "type": "Private Key", + "filename": "backend/test/integrations/SSHService.test.ts", + "hashed_secret": "be4fc4886bd949b369d5e092eb87494f12e57e5b", + "is_verified": false, + "line_number": 218 + } + ], + "backend/test/integrations/aws/AWSPlugin.executeAction.test.ts": [ + { + "type": "Secret Keyword", + "filename": "backend/test/integrations/aws/AWSPlugin.executeAction.test.ts", + "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4", + "is_verified": false, + "line_number": 47 + } + ], + "backend/test/integrations/ssh-config.test.ts": [ + { + "type": "Secret Keyword", + "filename": "backend/test/integrations/ssh-config.test.ts", + "hashed_secret": "7cb6efb98ba5972a9b5090dc2e517fe14d12cb04", + "is_verified": false, + "line_number": 40 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/integrations/ssh-config.test.ts", + "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4", + "is_verified": false, + "line_number": 41 + } + ], + "backend/test/integrations/ssh-config.unit.test.ts": [ + { + "type": "Secret Keyword", + "filename": "backend/test/integrations/ssh-config.unit.test.ts", + "hashed_secret": "7cb6efb98ba5972a9b5090dc2e517fe14d12cb04", + "is_verified": false, + "line_number": 364 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/integrations/ssh-config.unit.test.ts", + "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4", + "is_verified": false, + "line_number": 377 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/integrations/ssh-config.unit.test.ts", + "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", + "is_verified": false, + "line_number": 388 } ], "backend/test/performance/rbac-performance.test.ts": [ @@ -297,14 +325,14 @@ "filename": "backend/test/performance/rbac-performance.test.ts", "hashed_secret": "ab3eb0f868f05373c611a6c904ae319ff0772c0c", "is_verified": false, - "line_number": 96 + "line_number": 98 }, { "type": "Secret Keyword", "filename": "backend/test/performance/rbac-performance.test.ts", "hashed_secret": "f1624a6e773796ffd05aea214ec4b89a4fe66905", "is_verified": false, - "line_number": 295 + "line_number": 299 } ], "backend/test/properties/user/email-uniqueness.property.test.ts": [ @@ -313,14 +341,14 @@ "filename": "backend/test/properties/user/email-uniqueness.property.test.ts", "hashed_secret": "29c48a6839cce1bdf6566603b617f01b11a5c585", "is_verified": false, - "line_number": 127 + "line_number": 123 }, { "type": "Secret Keyword", "filename": "backend/test/properties/user/email-uniqueness.property.test.ts", "hashed_secret": "5c4f2778eafa38751fbd21fd4c1ef13ed9cfcff6", "is_verified": false, - "line_number": 198 + "line_number": 194 } ], "backend/test/properties/user/username-uniqueness.property.test.ts": [ @@ -329,14 +357,14 @@ "filename": "backend/test/properties/user/username-uniqueness.property.test.ts", "hashed_secret": "29c48a6839cce1bdf6566603b617f01b11a5c585", "is_verified": false, - "line_number": 127 + "line_number": 123 }, { "type": "Secret Keyword", "filename": "backend/test/properties/user/username-uniqueness.property.test.ts", "hashed_secret": "5c4f2778eafa38751fbd21fd4c1ef13ed9cfcff6", "is_verified": false, - "line_number": 198 + "line_number": 194 } ], "backend/test/routes/auth.test.ts": [ @@ -345,119 +373,249 @@ "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "49efef5f70d47adc2db2eb397fbef5f7bc560e29", "is_verified": false, - "line_number": 42 + "line_number": 45 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "cc795c98085b982ebfb57744e2b956f0f9c91e35", "is_verified": false, - "line_number": 122 + "line_number": 125 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "e6bf3d54c30a7c713c4676a5e3cce1e3c08fad9a", "is_verified": false, - "line_number": 345 + "line_number": 348 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "addbd3aa5619f2932733104eb8ceef08f6fd2693", "is_verified": false, - "line_number": 370 + "line_number": 373 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "bb60f36c6c5320f3063113c6c549c0806ae28851", "is_verified": false, - "line_number": 395 + "line_number": 398 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "ef8420d70dd7676e04bea55f405fa39b022a90c8", "is_verified": false, - "line_number": 420 + "line_number": 423 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "b2e98ad6f6eb8508dd6a14cfa704bad7f05f6fb1", "is_verified": false, - "line_number": 445 + "line_number": 448 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", "is_verified": false, - "line_number": 470 + "line_number": 473 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "98e540ffae8223c143f1453da4c8604ca943213e", "is_verified": false, - "line_number": 988 + "line_number": 984 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "6cca58fd7bcdac7ff6e551a93af8653819e5debf", "is_verified": false, - "line_number": 1064 + "line_number": 1054 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "73027c15d2f0904f8823803d682795f029da84c9", "is_verified": false, - "line_number": 2532 + "line_number": 2507 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "10a6d0b1420bda05bb65eb2eb4826acad64b23c3", "is_verified": false, - "line_number": 2553 + "line_number": 2528 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "dc8002865f92070749b264e76045b04fa3b8de71", "is_verified": false, - "line_number": 2939 + "line_number": 2914 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "328589bb85061abe136534a240365574ef3e55b5", "is_verified": false, - "line_number": 2985 + "line_number": 2960 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "a2cf898eb671a87b9fc1da29f26d06d05b103eae", "is_verified": false, - "line_number": 3031 + "line_number": 3006 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "a55a3986a699ba4d2b29b2057811322bf076abd0", "is_verified": false, - "line_number": 3077 + "line_number": 3052 }, { "type": "Secret Keyword", "filename": "backend/test/routes/auth.test.ts", "hashed_secret": "20f51273bab4ccffb79e8302fc4d876b3f9c1b72", "is_verified": false, - "line_number": 3168 + "line_number": 3143 + } + ], + "backend/test/routes/auth.test.ts.backup": [ + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "49efef5f70d47adc2db2eb397fbef5f7bc560e29", + "is_verified": false, + "line_number": 43 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "cc795c98085b982ebfb57744e2b956f0f9c91e35", + "is_verified": false, + "line_number": 123 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "e6bf3d54c30a7c713c4676a5e3cce1e3c08fad9a", + "is_verified": false, + "line_number": 346 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "addbd3aa5619f2932733104eb8ceef08f6fd2693", + "is_verified": false, + "line_number": 371 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "bb60f36c6c5320f3063113c6c549c0806ae28851", + "is_verified": false, + "line_number": 396 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "ef8420d70dd7676e04bea55f405fa39b022a90c8", + "is_verified": false, + "line_number": 421 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "b2e98ad6f6eb8508dd6a14cfa704bad7f05f6fb1", + "is_verified": false, + "line_number": 446 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", + "is_verified": false, + "line_number": 471 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "98e540ffae8223c143f1453da4c8604ca943213e", + "is_verified": false, + "line_number": 980 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "6cca58fd7bcdac7ff6e551a93af8653819e5debf", + "is_verified": false, + "line_number": 1056 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "73027c15d2f0904f8823803d682795f029da84c9", + "is_verified": false, + "line_number": 2524 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "10a6d0b1420bda05bb65eb2eb4826acad64b23c3", + "is_verified": false, + "line_number": 2545 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "dc8002865f92070749b264e76045b04fa3b8de71", + "is_verified": false, + "line_number": 2931 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "328589bb85061abe136534a240365574ef3e55b5", + "is_verified": false, + "line_number": 2977 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "a2cf898eb671a87b9fc1da29f26d06d05b103eae", + "is_verified": false, + "line_number": 3023 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "a55a3986a699ba4d2b29b2057811322bf076abd0", + "is_verified": false, + "line_number": 3069 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/auth.test.ts.backup", + "hashed_secret": "20f51273bab4ccffb79e8302fc4d876b3f9c1b72", + "is_verified": false, + "line_number": 3160 + } + ], + "backend/test/routes/aws.test.ts": [ + { + "type": "Secret Keyword", + "filename": "backend/test/routes/aws.test.ts", + "hashed_secret": "bc7d0af48032303599f08bd3942c3cad9768348f", + "is_verified": false, + "line_number": 73 } ], "backend/test/routes/groups.test.ts": [ @@ -480,7 +638,23 @@ "filename": "backend/test/routes/groups.test.ts", "hashed_secret": "ab3eb0f868f05373c611a6c904ae319ff0772c0c", "is_verified": false, - "line_number": 916 + "line_number": 871 + } + ], + "backend/test/routes/journal.test.ts": [ + { + "type": "Secret Keyword", + "filename": "backend/test/routes/journal.test.ts", + "hashed_secret": "bc7d0af48032303599f08bd3942c3cad9768348f", + "is_verified": false, + "line_number": 49 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/routes/journal.test.ts", + "hashed_secret": "7d82eb19413a1c9d1cfa268a12f365826e054432", + "is_verified": false, + "line_number": 104 } ], "backend/test/routes/permissions.test.ts": [ @@ -521,91 +695,177 @@ "filename": "backend/test/routes/users.test.ts", "hashed_secret": "bc7d0af48032303599f08bd3942c3cad9768348f", "is_verified": false, - "line_number": 53 + "line_number": 69 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "7d82eb19413a1c9d1cfa268a12f365826e054432", "is_verified": false, - "line_number": 95 + "line_number": 111 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "ab3eb0f868f05373c611a6c904ae319ff0772c0c", "is_verified": false, - "line_number": 177 + "line_number": 184 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "6f8e197b1c71a8b00fad66dc9df22a8464a4dcc1", "is_verified": false, - "line_number": 338 + "line_number": 345 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "e42162b52ade6af27b73ab86415b40d52ae2cc6d", "is_verified": false, - "line_number": 953 + "line_number": 930 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "a6f9656f4481dd8999a7da13ef1ace9d3ecce7cd", "is_verified": false, - "line_number": 1074 + "line_number": 1051 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "6417fb8955978dc506f194a08fa88b8cd70c12e2", "is_verified": false, - "line_number": 1084 + "line_number": 1061 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "b637d65de3a6d3f2675a67f5a655365d7de98ba5", "is_verified": false, - "line_number": 1095 + "line_number": 1072 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "4fc6cdec4087f914064f2715a98f0edc07b81bef", "is_verified": false, - "line_number": 1106 + "line_number": 1083 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "56be91c804c2116ab8e5e9722760669b187195bf", "is_verified": false, - "line_number": 1117 + "line_number": 1094 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "49aaf3412b65a2765a83f15f86f034261be3c660", "is_verified": false, - "line_number": 1161 + "line_number": 1138 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "052da94a6343dacf76068ae52e8f60afef31507e", "is_verified": false, - "line_number": 1251 + "line_number": 1228 }, { "type": "Secret Keyword", "filename": "backend/test/routes/users.test.ts", "hashed_secret": "517d9353abf9de848163de0b6e4c65ad797ffb9f", "is_verified": false, - "line_number": 1366 + "line_number": 1346 + } + ], + "backend/test/services/IntegrationConfigService.test.ts": [ + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "01f2f9650fa59b222aa4a864d6ddd993cd21c94c", + "is_verified": false, + "line_number": 5 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "e8964aa5aa9fea46cbfdbd4bd4c3898a9fcc5e25", + "is_verified": false, + "line_number": 52 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "02c173fc064db3d5d36ffbea6e3d09a6ca93ae45", + "is_verified": false, + "line_number": 98 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", + "is_verified": false, + "line_number": 151 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "120340c3d011e9621b9bb88b5884221b25251b88", + "is_verified": false, + "line_number": 199 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "86d28c461cbac1b8bf7a294d2c7f2625b2b421c6", + "is_verified": false, + "line_number": 201 + }, + { + "type": "Base64 High Entropy String", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "d70eab08607a4d05faa2d0d6647206599e9abc65", + "is_verified": false, + "line_number": 233 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "d70eab08607a4d05faa2d0d6647206599e9abc65", + "is_verified": false, + "line_number": 233 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "7a9b93cfa651fbc2c93d88edea4d4fcfe33c0a0b", + "is_verified": false, + "line_number": 270 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "aaa30803e5edebe0ae1d3950b7e6d388fe62e8db", + "is_verified": false, + "line_number": 412 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "d32610fec542c256156dd8673e2ce94f487870a1", + "is_verified": false, + "line_number": 413 + }, + { + "type": "Secret Keyword", + "filename": "backend/test/services/IntegrationConfigService.test.ts", + "hashed_secret": "4ea8d2335b430796cf3f500368c5b0f5b1dc90f5", + "is_verified": false, + "line_number": 422 } ], "backend/test/unit/error-handling.test.ts": [ @@ -614,28 +874,28 @@ "filename": "backend/test/unit/error-handling.test.ts", "hashed_secret": "98e540ffae8223c143f1453da4c8604ca943213e", "is_verified": false, - "line_number": 69 + "line_number": 70 }, { "type": "Secret Keyword", "filename": "backend/test/unit/error-handling.test.ts", "hashed_secret": "49efef5f70d47adc2db2eb397fbef5f7bc560e29", "is_verified": false, - "line_number": 86 + "line_number": 87 }, { "type": "Secret Keyword", "filename": "backend/test/unit/error-handling.test.ts", "hashed_secret": "1ded3053d0363079a4e681a3b700435d6d880290", "is_verified": false, - "line_number": 346 + "line_number": 352 }, { "type": "Secret Keyword", "filename": "backend/test/unit/error-handling.test.ts", "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 386 + "line_number": 392 } ], "backend/test/validation/rbacSchemas.test.ts": [ @@ -682,41 +942,54 @@ "line_number": 515 } ], - "frontend/dist/assets/index-Dq8PDVe-.js": [ + "docs/integrations/aws.md": [ { - "type": "Secret Keyword", - "filename": "frontend/dist/assets/index-Dq8PDVe-.js", - "hashed_secret": "3dd580e7dc68b01dd9dec0ba7d5e413e6bad5404", + "type": "AWS Access Key", + "filename": "docs/integrations/aws.md", + "hashed_secret": "25910f981e85ca04baf359199dd0bd4a3ae738b6", "is_verified": false, - "line_number": 2 + "line_number": 29 }, { - "type": "Secret Keyword", - "filename": "frontend/dist/assets/index-Dq8PDVe-.js", - "hashed_secret": "5166f0515b160c3f1beaf394b778161bc03319ef", + "type": "Base64 High Entropy String", + "filename": "docs/integrations/aws.md", + "hashed_secret": "d70eab08607a4d05faa2d0d6647206599e9abc65", "is_verified": false, - "line_number": 2 + "line_number": 30 }, { "type": "Secret Keyword", - "filename": "frontend/dist/assets/index-Dq8PDVe-.js", - "hashed_secret": "ded27e35bca3b77f196d7fea07126942addd7362", + "filename": "docs/integrations/aws.md", + "hashed_secret": "d70eab08607a4d05faa2d0d6647206599e9abc65", "is_verified": false, - "line_number": 2 - }, + "line_number": 30 + } + ], + "docs/integrations/proxmox.md": [ { "type": "Secret Keyword", - "filename": "frontend/dist/assets/index-Dq8PDVe-.js", - "hashed_secret": "3e7d56a95804ff3c322b97a7e7cdba25dc920957", + "filename": "docs/integrations/proxmox.md", + "hashed_secret": "a6778f1880744bd1a342a8e3789135412d8f9da2", "is_verified": false, - "line_number": 388 - }, + "line_number": 152 + } + ], + "docs/integrations/ssh.md": [ { "type": "Secret Keyword", - "filename": "frontend/dist/assets/index-Dq8PDVe-.js", - "hashed_secret": "6c56a9249cba324d029f725f1f7c0e47184e2dcf", + "filename": "docs/integrations/ssh.md", + "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 388 + "line_number": 131 + } + ], + "frontend/src/components/AWSSetupGuide.svelte": [ + { + "type": "AWS Access Key", + "filename": "frontend/src/components/AWSSetupGuide.svelte", + "hashed_secret": "25910f981e85ca04baf359199dd0bd4a3ae738b6", + "is_verified": false, + "line_number": 162 } ], "frontend/src/components/ChangePasswordDialog.svelte": [ @@ -769,7 +1042,16 @@ "filename": "frontend/src/lib/api-auth-integration.test.ts", "hashed_secret": "cbfdac6008f9cab4083784cbd1874f76618d2a97", "is_verified": false, - "line_number": 69 + "line_number": 70 + } + ], + "frontend/src/lib/api-provisioning.test.ts": [ + { + "type": "Secret Keyword", + "filename": "frontend/src/lib/api-provisioning.test.ts", + "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4", + "is_verified": false, + "line_number": 302 } ], "frontend/src/lib/auth.test.ts": [ @@ -778,14 +1060,14 @@ "filename": "frontend/src/lib/auth.test.ts", "hashed_secret": "cbfdac6008f9cab4083784cbd1874f76618d2a97", "is_verified": false, - "line_number": 79 + "line_number": 80 }, { "type": "Secret Keyword", "filename": "frontend/src/lib/auth.test.ts", "hashed_secret": "d8ecf7db8fc9ec9c31bc5c9ae2929cc599c75f8d", "is_verified": false, - "line_number": 114 + "line_number": 115 } ], "frontend/src/pages/LoginPage.svelte": [ @@ -828,5 +1110,5 @@ } ] }, - "generated_at": "2026-03-04T07:57:54Z" + "generated_at": "2026-03-16T11:18:29Z" } diff --git a/CLAUDE.md b/CLAUDE.md index 7d3718ad..bf7063fd 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -56,7 +56,7 @@ All infrastructure integrations (Bolt, PuppetDB, Puppetserver, Hiera, Ansible, S - **`services/`** — Cross-cutting services: `ExecutionQueue` (concurrent limiting, FIFO), `StreamingExecutionManager` (SSE real-time output), `CommandWhitelistService` (security), `DatabaseService`, `AuthenticationService`, `BatchExecutionService`, and RBAC services (`UserService`, `RoleService`, `PermissionService`, `GroupService`) - **`routes/`** — Express route handlers. All async handlers must be wrapped with `asyncHandler()` from `utils/` - **`middleware/`** — Auth (JWT), RBAC, error handler, rate limiting, security headers -- **`database/`** — `DatabaseService.ts` (SQLite, schema/migration on startup), `ExecutionRepository.ts` (CRUD for execution history). Schema in `database/schema.sql`, migrations in `database/migrations.sql` +- **`database/`** — `DatabaseService.ts` (SQLite, migration-first approach), `ExecutionRepository.ts` (CRUD for execution history). All schema in `migrations/*.sql` (000: initial, 001: RBAC, etc.) - **`errors/`** — Typed error classes extending base classes; use these instead of generic `Error` - **`validation/`** — Zod schemas for request body validation diff --git a/Dockerfile b/Dockerfile index b4b9e4ce..9eae7e4b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -51,7 +51,7 @@ ARG BUILDPLATFORM # Add metadata labels LABEL org.opencontainers.image.title="Pabawi" LABEL org.opencontainers.image.description="Puppet Ansible Bolt Awesome Web Interface" -LABEL org.opencontainers.image.version="0.8.0" +LABEL org.opencontainers.image.version="0.10.0" LABEL org.opencontainers.image.vendor="example42" LABEL org.opencontainers.image.source="https://github.com/example42/pabawi" @@ -87,7 +87,7 @@ RUN apt-get update && \ ruby-dev \ build-essential \ ansible \ - && gem install openbolt -v 5.1.0 --no-document \ + openbolt \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* @@ -104,8 +104,9 @@ COPY --from=backend-builder --chown=pabawi:pabawi /app/backend/dist ./dist COPY --from=backend-deps --chown=pabawi:pabawi /app/backend/node_modules ./node_modules COPY --from=backend-builder --chown=pabawi:pabawi /app/backend/package*.json ./ -# Copy SQL schema file (not copied by TypeScript compiler) -COPY --from=backend-builder --chown=pabawi:pabawi /app/backend/src/database/schema.sql ./dist/database/ +# Copy database directory with all SQL files and migrations (not copied by TypeScript compiler) +# This ensures schema files, migrations, and any future database-related files are included +COPY --from=backend-builder --chown=pabawi:pabawi /app/backend/src/database/ ./dist/database/ # Copy built frontend to public directory COPY --from=frontend-builder --chown=pabawi:pabawi /app/frontend/dist ./public @@ -139,7 +140,9 @@ ENV NODE_ENV=production \ PUPPETDB_ENABLED=false \ PUPPETSERVER_ENABLED=false \ HIERA_ENABLED=false \ - ANSIBLE_ENABLED=false + ANSIBLE_ENABLED=false \ + PROXMOX_ENABLED=false \ + AWS_ENABLED=false # Health check HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ diff --git a/Dockerfile.alpine b/Dockerfile.alpine index 23a638ac..085f6dcd 100644 --- a/Dockerfile.alpine +++ b/Dockerfile.alpine @@ -44,7 +44,7 @@ ARG BUILDPLATFORM # Add metadata labels LABEL org.opencontainers.image.title="Pabawi" LABEL org.opencontainers.image.description="Web interface for Bolt automation tool" -LABEL org.opencontainers.image.version="0.4.0" +LABEL org.opencontainers.image.version="0.10.0" LABEL org.opencontainers.image.vendor="example42" LABEL org.opencontainers.image.source="https://github.com/example42/pabawi" diff --git a/Dockerfile.ubuntu b/Dockerfile.ubuntu index fc64f3da..22e368de 100644 --- a/Dockerfile.ubuntu +++ b/Dockerfile.ubuntu @@ -44,7 +44,7 @@ ARG BUILDPLATFORM # Add metadata labels LABEL org.opencontainers.image.title="Pabawi" LABEL org.opencontainers.image.description="Web interface for Bolt automation tool" -LABEL org.opencontainers.image.version="0.4.0" +LABEL org.opencontainers.image.version="0.10.0" LABEL org.opencontainers.image.vendor="example42" LABEL org.opencontainers.image.source="https://github.com/example42/pabawi" diff --git a/backend/.env.example b/backend/.env.example index 98fcef0f..c8da970f 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -17,6 +17,11 @@ COMMAND_WHITELIST_ALLOW_ALL=false COMMAND_WHITELIST=["ps -adef","pwd","whoami","uptime","free"] COMMAND_WHITELIST_MATCH_MODE=exact +# Provisioning safety +# Set to true to allow all destructive provisioning actions +# (e.g., Proxmox VM/LXC destroy, AWS EC2 terminate) +ALLOW_DESTRUCTIVE_PROVISIONING=false + # Bolt integration (optional) BOLT_PROJECT_PATH=./samples/integrations/bolt BOLT_EXECUTION_TIMEOUT=300000 @@ -71,3 +76,23 @@ SSH_SUDO_PASSWORDLESS=true SSH_SUDO_PASSWORD= SSH_SUDO_USER=root SSH_PRIORITY=50 + +# Proxmox integration (optional) +PROXMOX_ENABLED=false +PROXMOX_HOST=proxmox.example.com +PROXMOX_PORT=8006 +# Token authentication (recommended over password) +# PROXMOX_TOKEN=user@realm!tokenid=token-value +PROXMOX_SSL_REJECT_UNAUTHORIZED=true + +# AWS integration (optional) +AWS_ENABLED=false +# Use AWS profiles or IAM roles when possible instead of static keys. +# If omitted, the AWS SDK default credential chain is used +# (env vars, ~/.aws/credentials, instance profile, etc.) +# AWS_ACCESS_KEY_ID= +# AWS_SECRET_ACCESS_KEY= +# AWS_DEFAULT_REGION=us-east-1 +# Query multiple regions (JSON array or comma-separated) +# AWS_REGIONS=["us-east-1","eu-west-1"] +# AWS_PROFILE=default diff --git a/backend/package.json b/backend/package.json index 6016e1d5..ab725712 100644 --- a/backend/package.json +++ b/backend/package.json @@ -1,11 +1,11 @@ { "name": "backend", - "version": "0.8.0", + "version": "0.10.0", "description": "Backend API server for Pabawi", "main": "dist/server.js", "scripts": { "dev": "tsx watch src/server.ts", - "build": "tsc && mkdir -p dist/database && cp src/database/schema.sql dist/database/ && cp src/database/migrations.sql dist/database/", + "build": "tsc && mkdir -p dist/database && cp -r src/database/migrations dist/database/", "start": "node dist/server.js", "test": "vitest --run --passWithNoTests", "test:watch": "vitest", @@ -13,6 +13,8 @@ "lint:fix": "eslint src --ext .ts --fix" }, "dependencies": { + "@aws-sdk/client-ec2": "^3.700.0", + "@aws-sdk/client-sts": "^3.700.0", "@types/ssh2": "^1.15.5", "bcrypt": "^5.1.1", "cors": "^2.8.5", @@ -21,6 +23,7 @@ "express-rate-limit": "^8.2.1", "helmet": "^8.1.0", "jsonwebtoken": "^9.0.2", + "pg": "^8.13.0", "sqlite3": "^5.1.7", "ssh2": "^1.17.0", "yaml": "^2.8.2", @@ -32,6 +35,7 @@ "@types/express": "^4.17.21", "@types/jsonwebtoken": "^9.0.7", "@types/node": "^20.19.27", + "@types/pg": "^8.11.0", "@types/supertest": "^6.0.2", "fast-check": "^4.3.0", "supertest": "^7.0.0", diff --git a/backend/src/config/ConfigService.ts b/backend/src/config/ConfigService.ts index ca0cb9b1..b3c93324 100644 --- a/backend/src/config/ConfigService.ts +++ b/backend/src/config/ConfigService.ts @@ -109,6 +109,33 @@ export class ConfigService { exclusionPatterns?: string[]; }; }; + proxmox?: { + enabled: boolean; + host: string; + port?: number; + username?: string; + password?: string; + realm?: string; + token?: string; + ssl?: { + rejectUnauthorized?: boolean; + ca?: string; + cert?: string; + key?: string; + }; + timeout?: number; + priority?: number; + }; + aws?: { + enabled: boolean; + accessKeyId?: string; + secretAccessKey?: string; + region?: string; + regions?: string[]; + sessionToken?: string; + profile?: string; + endpoint?: string; + }; } { const integrations: ReturnType = {}; @@ -396,6 +423,80 @@ export class ConfigService { } } + // Parse Proxmox configuration + if (process.env.PROXMOX_ENABLED === "true") { + const host = process.env.PROXMOX_HOST; + if (!host) { + throw new Error( + "PROXMOX_HOST is required when PROXMOX_ENABLED is true", + ); + } + + integrations.proxmox = { + enabled: true, + host, + port: process.env.PROXMOX_PORT + ? parseInt(process.env.PROXMOX_PORT, 10) + : undefined, + username: process.env.PROXMOX_USERNAME, + password: process.env.PROXMOX_PASSWORD, + realm: process.env.PROXMOX_REALM, + token: process.env.PROXMOX_TOKEN, + timeout: process.env.PROXMOX_TIMEOUT + ? parseInt(process.env.PROXMOX_TIMEOUT, 10) + : undefined, + priority: process.env.PROXMOX_PRIORITY + ? parseInt(process.env.PROXMOX_PRIORITY, 10) + : undefined, + }; + + // Parse SSL configuration if any SSL-related env vars are set + if ( + process.env.PROXMOX_SSL_REJECT_UNAUTHORIZED !== undefined || + process.env.PROXMOX_SSL_CA || + process.env.PROXMOX_SSL_CERT || + process.env.PROXMOX_SSL_KEY + ) { + integrations.proxmox.ssl = { + rejectUnauthorized: + process.env.PROXMOX_SSL_REJECT_UNAUTHORIZED !== "false", + ca: process.env.PROXMOX_SSL_CA, + cert: process.env.PROXMOX_SSL_CERT, + key: process.env.PROXMOX_SSL_KEY, + }; + } + } + + // Parse AWS configuration + if (process.env.AWS_ENABLED === "true") { + // Parse regions from JSON array or comma-separated string + let regions: string[] | undefined; + if (process.env.AWS_REGIONS) { + try { + const parsed = JSON.parse(process.env.AWS_REGIONS) as unknown; + if (Array.isArray(parsed)) { + regions = parsed.filter( + (item): item is string => typeof item === "string", + ); + } + } catch { + // Not JSON — treat as comma-separated + regions = process.env.AWS_REGIONS.split(",").map((r) => r.trim()).filter(Boolean); + } + } + + integrations.aws = { + enabled: true, + accessKeyId: process.env.AWS_ACCESS_KEY_ID, + secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY, + region: process.env.AWS_DEFAULT_REGION || undefined, + regions, + sessionToken: process.env.AWS_SESSION_TOKEN, + profile: process.env.AWS_PROFILE, + endpoint: process.env.AWS_ENDPOINT, + }; + } + return integrations; } @@ -483,6 +584,12 @@ export class ConfigService { process.env.UI_SHOW_HOME_PAGE_RUN_CHART !== "false", }; + // Parse provisioning safety configuration + const provisioning = { + allowDestructiveActions: + process.env.ALLOW_DESTRUCTIVE_PROVISIONING === "true", + }; + // Build configuration object const rawConfig = { port: process.env.PORT ? parseInt(process.env.PORT, 10) : undefined, @@ -502,6 +609,7 @@ export class ConfigService { cache, executionQueue, integrations, + provisioning, ui, }; @@ -616,6 +724,13 @@ export class ConfigService { return this.config.integrations; } + /** + * Check whether destructive provisioning actions (destroy/terminate) are allowed + */ + public isDestructiveProvisioningAllowed(): boolean { + return this.config.provisioning.allowDestructiveActions; + } + /** * Get PuppetDB configuration if enabled */ @@ -674,4 +789,17 @@ export class ConfigService { public getUIConfig(): typeof this.config.ui { return this.config.ui; } + + /** + * Get AWS configuration if enabled + */ + public getAWSConfig(): + | (typeof this.config.integrations.aws & { enabled: true }) + | null { + const aws = this.config.integrations.aws; + if (aws?.enabled) { + return aws as typeof aws & { enabled: true }; + } + return null; + } } diff --git a/backend/src/config/schema.ts b/backend/src/config/schema.ts index 87b3c1f0..30f1ac8a 100644 --- a/backend/src/config/schema.ts +++ b/backend/src/config/schema.ts @@ -269,6 +269,65 @@ export const HieraConfigSchema = z.object({ export type HieraConfig = z.infer; +/** + * Proxmox SSL configuration schema + */ +export const ProxmoxSSLConfigSchema = z.object({ + rejectUnauthorized: z.boolean().default(true), + ca: z.string().optional(), + cert: z.string().optional(), + key: z.string().optional(), +}); + +export type ProxmoxSSLConfig = z.infer; + +/** + * Proxmox integration configuration schema + */ +export const ProxmoxConfigSchema = z.object({ + enabled: z.boolean().default(false), + host: z.string(), + port: z.number().int().positive().max(65535).default(8006), + username: z.string().optional(), + password: z.string().optional(), + realm: z.string().optional(), + token: z.string().optional(), + ssl: ProxmoxSSLConfigSchema.optional(), + timeout: z.number().int().positive().default(30000), // 30 seconds + priority: z.number().int().nonnegative().default(7), +}); + +export type ProxmoxConfig = z.infer; + +/** + * AWS integration configuration schema + */ +export const AWSConfigSchema = z.object({ + enabled: z.boolean().default(false), + accessKeyId: z.string().optional(), + secretAccessKey: z.string().optional(), + region: z.string().default("us-east-1"), + regions: z.array(z.string()).optional(), + sessionToken: z.string().optional(), + profile: z.string().optional(), + endpoint: z.string().optional(), +}); + +export type AWSIntegrationConfig = z.infer; + +/** + * Provisioning safety configuration schema + * + * Controls whether destructive provisioning actions (e.g., destroy VM/LXC, + * terminate EC2 instance) are allowed. When disabled, all provisioning + * integrations will reject destroy/terminate requests. + */ +export const ProvisioningConfigSchema = z.object({ + allowDestructiveActions: z.boolean().default(false), +}); + +export type ProvisioningConfig = z.infer; + /** * Integrations configuration schema */ @@ -277,6 +336,8 @@ export const IntegrationsConfigSchema = z.object({ puppetdb: PuppetDBConfigSchema.optional(), puppetserver: PuppetserverConfigSchema.optional(), hiera: HieraConfigSchema.optional(), + proxmox: ProxmoxConfigSchema.optional(), + aws: AWSConfigSchema.optional(), }); export type IntegrationsConfig = z.infer; @@ -308,6 +369,7 @@ export const AppConfigSchema = z.object({ cache: CacheConfigSchema, executionQueue: ExecutionQueueConfigSchema, integrations: IntegrationsConfigSchema.default({}), + provisioning: ProvisioningConfigSchema.default({ allowDestructiveActions: false }), ui: UIConfigSchema.default({ showHomePageRunChart: true }), }); diff --git a/backend/src/database/AdapterFactory.ts b/backend/src/database/AdapterFactory.ts new file mode 100644 index 00000000..889acf0b --- /dev/null +++ b/backend/src/database/AdapterFactory.ts @@ -0,0 +1,29 @@ +import type { DatabaseAdapter } from "./DatabaseAdapter"; + +export interface AdapterFactoryConfig { + databasePath: string; +} + +/** + * Create the appropriate DatabaseAdapter based on environment configuration. + * + * - DB_TYPE="sqlite" or unset → SQLiteAdapter + * - DB_TYPE="postgres" → PostgresAdapter (requires DATABASE_URL) + */ +export async function createDatabaseAdapter(config: AdapterFactoryConfig): Promise { + const dbType = process.env.DB_TYPE ?? "sqlite"; + + if (dbType === "postgres") { + const databaseUrl = process.env.DATABASE_URL; + if (!databaseUrl) { + throw new Error( + "DATABASE_URL environment variable is required when DB_TYPE is 'postgres'" + ); + } + const { PostgresAdapter } = await import("./PostgresAdapter"); + return new PostgresAdapter(databaseUrl); + } + + const { SQLiteAdapter } = await import("./SQLiteAdapter"); + return new SQLiteAdapter(config.databasePath); +} diff --git a/backend/src/database/DatabaseAdapter.ts b/backend/src/database/DatabaseAdapter.ts new file mode 100644 index 00000000..78ed2af2 --- /dev/null +++ b/backend/src/database/DatabaseAdapter.ts @@ -0,0 +1,42 @@ +/** + * Unified database interface abstracting SQLite and PostgreSQL. + * All services operate against this interface, enabling either backend + * without code changes. + */ +export interface DatabaseAdapter { + /** Execute a query and return all matching rows. */ + query(sql: string, params?: unknown[]): Promise; + + /** Execute a query and return the first row, or null. */ + queryOne(sql: string, params?: unknown[]): Promise; + + /** Execute a statement (INSERT/UPDATE/DELETE) and return the number of affected rows. */ + execute(sql: string, params?: unknown[]): Promise<{ changes: number }>; + + /** Begin a transaction. */ + beginTransaction(): Promise; + + /** Commit the current transaction. */ + commit(): Promise; + + /** Rollback the current transaction. */ + rollback(): Promise; + + /** Run a callback inside a transaction; commits on success, rolls back on error. */ + withTransaction(fn: () => Promise): Promise; + + /** Open the database connection and perform any setup (e.g. WAL mode). */ + initialize(): Promise; + + /** Close the database connection and release resources. */ + close(): Promise; + + /** Returns true if the database connection is open. */ + isConnected(): boolean; + + /** Returns the SQL dialect of this adapter. */ + getDialect(): "sqlite" | "postgres"; + + /** Returns the parameter placeholder for the given 1-based index. */ + getPlaceholder(index: number): string; +} diff --git a/backend/src/database/DatabaseService.ts b/backend/src/database/DatabaseService.ts index 1f424f7e..aed8ab3c 100644 --- a/backend/src/database/DatabaseService.ts +++ b/backend/src/database/DatabaseService.ts @@ -1,14 +1,14 @@ -import sqlite3 from "sqlite3"; -import { readFileSync } from "fs"; -import { dirname, join } from "path"; -import { mkdirSync, existsSync } from "fs"; +import { createDatabaseAdapter } from "./AdapterFactory"; +import type { DatabaseAdapter } from "./DatabaseAdapter"; import { MigrationRunner } from "./MigrationRunner"; +import { dirname } from "path"; +import { mkdirSync, existsSync } from "fs"; /** - * Database service for SQLite initialization and connection management + * Database service for initialization and connection management */ export class DatabaseService { - private db: sqlite3.Database | null = null; + private adapter: DatabaseAdapter | null = null; private databasePath: string; constructor(databasePath: string) { @@ -20,123 +20,21 @@ export class DatabaseService { */ public async initialize(): Promise { try { - // Ensure database directory exists + // Ensure database directory exists (for SQLite) const dbDir = dirname(this.databasePath); if (!existsSync(dbDir)) { mkdirSync(dbDir, { recursive: true }); } - // Create database connection - this.db = await this.createConnection(); + // Create adapter via factory + this.adapter = await createDatabaseAdapter({ databasePath: this.databasePath }); + await this.adapter.initialize(); // Initialize schema - await this.initializeSchema(); - } catch (error) { - throw new Error( - `Database initialization failed: ${error instanceof Error ? error.message : "Unknown error"}`, - ); - } - } - - /** - * Create SQLite database connection with optimized settings - */ - private createConnection(): Promise { - return new Promise((resolve, reject) => { - const db = new sqlite3.Database(this.databasePath, (err) => { - if (err) { - reject(new Error(`Failed to connect to database: ${err.message}`)); - } else { - // Enable WAL mode for better concurrency - db.run('PRAGMA journal_mode = WAL;', (walErr) => { - if (walErr) { - console.warn('Failed to enable WAL mode:', walErr.message); - } - }); - - // Set performance optimizations - db.run('PRAGMA synchronous = NORMAL;'); // Balance between safety and speed - db.run('PRAGMA cache_size = -64000;'); // 64MB cache - db.run('PRAGMA temp_store = MEMORY;'); // Use memory for temp tables - db.run('PRAGMA mmap_size = 268435456;'); // 256MB memory-mapped I/O - - // Enable foreign keys - db.run('PRAGMA foreign_keys = ON;', (fkErr) => { - if (fkErr) { - reject(new Error(`Failed to enable foreign keys: ${fkErr.message}`)); - } else { - resolve(db); - } - }); - } - }); - }); - } - - /** - * Initialize database schema from SQL file - */ - private async initializeSchema(): Promise { - if (!this.db) { - throw new Error("Database connection not established"); - } - - try { - // Read and execute main schema file - const schemaPath = join(__dirname, "schema.sql"); - const schema = readFileSync(schemaPath, "utf-8"); - - // Split schema into statements - const statements = schema - .split(";") - .map((s) => s.trim()) - .filter((s) => s.length > 0); - - // Execute each statement separately to handle migration errors gracefully - for (const statement of statements) { - try { - await this.exec(statement); - } catch (error) { - // Ignore "duplicate column" errors from ALTER TABLE (migration already applied) - const errorMessage = error instanceof Error ? error.message : ""; - if (!errorMessage.includes("duplicate column")) { - throw error; - } - // Migration already applied, continue - } - } - - // Read and execute RBAC schema file - const rbacSchemaPath = join(__dirname, "rbac-schema.sql"); - if (existsSync(rbacSchemaPath)) { - const rbacSchema = readFileSync(rbacSchemaPath, "utf-8"); - - // Split RBAC schema into statements - const rbacStatements = rbacSchema - .split(";") - .map((s) => s.trim()) - .filter((s) => s.length > 0); - - // Execute each RBAC statement - for (const statement of rbacStatements) { - try { - await this.exec(statement); - } catch (error) { - // Ignore "duplicate column" errors from ALTER TABLE (migration already applied) - const errorMessage = error instanceof Error ? error.message : ""; - if (!errorMessage.includes("duplicate column")) { - throw error; - } - // Migration already applied, continue - } - } - } - - // Run migrations await this.runMigrations(); } catch (error) { throw new Error( - `Schema initialization failed: ${error instanceof Error ? error.message : "Unknown error"}`, + `Database initialization failed: ${error instanceof Error ? error.message : "Unknown error"}`, ); } } @@ -145,12 +43,12 @@ export class DatabaseService { * Run database migrations using the migration runner */ private async runMigrations(): Promise { - if (!this.db) { + if (!this.adapter) { throw new Error("Database connection not established"); } try { - const migrationRunner = new MigrationRunner(this.db); + const migrationRunner = new MigrationRunner(this.adapter); const appliedCount = await migrationRunner.runPendingMigrations(); if (appliedCount > 0) { @@ -164,110 +62,38 @@ export class DatabaseService { } /** - * Execute SQL statement - */ - private exec(sql: string): Promise { - return new Promise((resolve, reject) => { - if (!this.db) { - reject(new Error("Database connection not established")); - return; - } - - this.db.exec(sql, (err) => { - if (err) { - reject(err); - } else { - resolve(); - } - }); - }); - } - - /** - * Get database connection + * Get database adapter */ - public getConnection(): sqlite3.Database { - if (!this.db) { + public getAdapter(): DatabaseAdapter { + if (!this.adapter) { throw new Error("Database not initialized. Call initialize() first."); } - return this.db; + return this.adapter; } /** - * Prepare a SQL statement for reuse (improves performance for repeated queries) - * @param sql SQL statement with placeholders - * @returns Prepared statement + * Get database connection (backward-compatible alias for getAdapter) + * @deprecated Use getAdapter() instead */ - public prepareStatement(sql: string): sqlite3.Statement { - if (!this.db) { - throw new Error("Database not initialized. Call initialize() first."); - } - return this.db.prepare(sql); - } - - /** - * Execute a prepared statement - * @param statement Prepared statement - * @param params Parameters for the statement - * @returns Promise that resolves when execution completes - */ - public executePrepared( - statement: sqlite3.Statement, - params: unknown[] = [] - ): Promise { - return new Promise((resolve, reject) => { - statement.run(params, (err) => { - if (err) { - reject(err); - } else { - resolve(); - } - }); - }); - } - - /** - * Finalize a prepared statement to free resources - * @param statement Prepared statement to finalize - */ - public finalizeStatement(statement: sqlite3.Statement): Promise { - return new Promise((resolve, reject) => { - statement.finalize((err: Error | null) => { - if (err) { - reject(err); - } else { - resolve(); - } - }); - }); + public getConnection(): DatabaseAdapter { + return this.getAdapter(); } /** * Close database connection */ public async close(): Promise { - if (!this.db) { - return; + if (this.adapter) { + await this.adapter.close(); + this.adapter = null; } - - const dbToClose = this.db; - return new Promise((resolve, reject) => { - dbToClose.close((err) => { - if (err) { - reject(new Error(`Failed to close database: ${err.message}`)); - } else { - this.db = null; - resolve(); - } - }); - }); } /** * Check if database is initialized */ public isInitialized(): boolean { - return this.db !== null; + return this.adapter !== null; } /** @@ -277,11 +103,11 @@ export class DatabaseService { applied: { id: string; name: string; appliedAt: string }[]; pending: { id: string; filename: string }[]; }> { - if (!this.db) { + if (!this.adapter) { throw new Error("Database not initialized. Call initialize() first."); } - const migrationRunner = new MigrationRunner(this.db); + const migrationRunner = new MigrationRunner(this.adapter); return await migrationRunner.getStatus(); } } diff --git a/backend/src/database/ExecutionRepository.ts b/backend/src/database/ExecutionRepository.ts index ff32ff46..b6e75481 100644 --- a/backend/src/database/ExecutionRepository.ts +++ b/backend/src/database/ExecutionRepository.ts @@ -1,4 +1,4 @@ -import type sqlite3 from "sqlite3"; +import type { DatabaseAdapter } from "./DatabaseAdapter"; import { randomUUID } from "crypto"; /** @@ -118,9 +118,9 @@ export interface StatusCounts { * Repository for managing execution records in SQLite */ export class ExecutionRepository { - private db: sqlite3.Database; + private db: DatabaseAdapter; - constructor(db: sqlite3.Database) { + constructor(db: DatabaseAdapter) { this.db = db; } @@ -166,7 +166,7 @@ export class ExecutionRepository { ]; try { - await this.run(sql, params); + await this.db.execute(sql, params); return id; } catch (error) { throw new Error( @@ -223,7 +223,7 @@ export class ExecutionRepository { const sql = `UPDATE executions SET ${updateFields.join(", ")} WHERE id = ?`; try { - await this.run(sql, params); + await this.db.execute(sql, params); } catch (error) { // Provide detailed error information for debugging const errorMessage = error instanceof Error ? error.message : "Unknown error"; @@ -247,7 +247,7 @@ export class ExecutionRepository { const sql = "SELECT * FROM executions WHERE id = ?"; try { - const row = await this.get(sql, [id]); + const row = await this.db.queryOne(sql, [id]); return row ? this.mapRowToRecord(row) : null; } catch (error) { throw new Error( @@ -305,7 +305,7 @@ export class ExecutionRepository { params.push(pagination.pageSize, offset); try { - const rows = await this.all(sql, params); + const rows = await this.db.query(sql, params); return rows.map((row) => this.mapRowToRecord(row)); } catch (error) { throw new Error( @@ -328,7 +328,7 @@ export class ExecutionRepository { `; try { - const row = await this.get(sql, [executionId]); + const row = await this.db.queryOne(sql, [executionId]); return row ? this.mapRowToRecord(row) : null; } catch (error) { throw new Error( @@ -351,7 +351,7 @@ export class ExecutionRepository { `; try { - const rows = await this.all(sql, [originalExecutionId]); + const rows = await this.db.query(sql, [originalExecutionId]); return rows.map((row) => this.mapRowToRecord(row)); } catch (error) { throw new Error( @@ -404,7 +404,7 @@ export class ExecutionRepository { `; try { - const row = await this.get(sql, []); + const row = await this.db.queryOne(sql, []); return { total: row?.total ?? 0, running: row?.running ?? 0, @@ -419,51 +419,6 @@ export class ExecutionRepository { } } - /** - * Execute SQL statement with parameters (INSERT, UPDATE, DELETE) - */ - private run(sql: string, params: unknown[]): Promise { - return new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) { - reject(err); - } else { - resolve(); - } - }); - }); - } - - /** - * Get single row from database - */ - private get(sql: string, params: unknown[]): Promise { - return new Promise((resolve, reject) => { - this.db.get(sql, params, (err, row: DbRow | undefined) => { - if (err) { - reject(err); - } else { - resolve(row); - } - }); - }); - } - - /** - * Get all rows from database - */ - private all(sql: string, params: unknown[]): Promise { - return new Promise((resolve, reject) => { - this.db.all(sql, params, (err, rows) => { - if (err) { - reject(err); - } else { - resolve(rows as DbRow[]); - } - }); - }); - } - /** * Map database row to ExecutionRecord */ diff --git a/backend/src/database/MigrationRunner.ts b/backend/src/database/MigrationRunner.ts index 3d85b24f..5232938b 100644 --- a/backend/src/database/MigrationRunner.ts +++ b/backend/src/database/MigrationRunner.ts @@ -1,6 +1,6 @@ -import type sqlite3 from "sqlite3"; import { readFileSync, readdirSync } from "fs"; import { join } from "path"; +import type { DatabaseAdapter } from "./DatabaseAdapter"; /** * Migration metadata @@ -22,13 +22,15 @@ interface MigrationFile { /** * Database migration runner - * Tracks which migrations have been applied and runs pending migrations in order + * Tracks which migrations have been applied and runs pending migrations in order. + * Supports dialect-specific files (NNN_name.sqlite.sql, NNN_name.postgres.sql) + * and shared files (NNN_name.sql). */ export class MigrationRunner { - private db: sqlite3.Database; + private db: DatabaseAdapter; private migrationsDir: string; - constructor(db: sqlite3.Database, migrationsDir?: string) { + constructor(db: DatabaseAdapter, migrationsDir?: string) { this.db = db; this.migrationsDir = migrationsDir ?? join(__dirname, "migrations"); } @@ -44,62 +46,90 @@ export class MigrationRunner { appliedAt TEXT NOT NULL ) `; - - return new Promise((resolve, reject) => { - this.db.run(createTableSQL, (err) => { - if (err) { - reject(new Error(`Failed to create migrations table: ${err.message}`)); - } else { - resolve(); - } - }); - }); + await this.db.execute(createTableSQL); } /** * Get list of applied migrations from database */ private async getAppliedMigrations(): Promise { - return new Promise((resolve, reject) => { - this.db.all( - "SELECT id, name, appliedAt FROM migrations ORDER BY id", - (err, rows: Migration[]) => { - if (err) { - reject(new Error(`Failed to fetch applied migrations: ${err.message}`)); - } else { - resolve(rows); - } - } - ); - }); + return this.db.query( + "SELECT id, name, appliedAt FROM migrations ORDER BY id" + ); } /** - * Get list of migration files from migrations directory + * Get list of migration files from migrations directory, filtered by dialect. + * + * Supports three filename patterns: + * - NNN_name.sql — shared (works for both dialects) + * - NNN_name.sqlite.sql — SQLite-specific + * - NNN_name.postgres.sql — PostgreSQL-specific + * + * If both a shared file and a dialect-specific file exist for the same ID, + * the dialect-specific file takes precedence. */ private getMigrationFiles(): MigrationFile[] { + const dialect = this.db.getDialect(); + try { const files = readdirSync(this.migrationsDir); - return files - .filter(file => file.endsWith(".sql")) - .map(filename => { - // Extract migration ID from filename (e.g., "001_initial_rbac.sql" -> "001") - const match = /^(\d+)_(.+)\.sql$/.exec(filename); - if (!match) { - throw new Error(`Invalid migration filename format: ${filename}. Expected format: NNN_name.sql`); - } - - return { - id: match[1], - filename, - path: join(this.migrationsDir, filename) - }; - }) - .sort((a, b) => a.id.localeCompare(b.id)); + // Regex matches: NNN_name.sql, NNN_name.sqlite.sql, NNN_name.postgres.sql + const migrationRegex = /^(\d+)_(.+?)(?:\.(sqlite|postgres))?\.sql$/; + + // Collect candidates grouped by migration ID + const candidatesByID = new Map< + string, + { shared?: MigrationFile; dialectSpecific?: MigrationFile } + >(); + + for (const filename of files) { + if (!filename.endsWith(".sql")) continue; + + const match = migrationRegex.exec(filename); + if (!match) { + throw new Error( + `Invalid migration filename format: ${filename}. Expected format: NNN_name.sql, NNN_name.sqlite.sql, or NNN_name.postgres.sql` + ); + } + + const id = match[1]; + const fileDialect = match[3] as "sqlite" | "postgres" | undefined; + + const migrationFile: MigrationFile = { + id, + filename, + path: join(this.migrationsDir, filename), + }; + + if (!candidatesByID.has(id)) { + candidatesByID.set(id, {}); + } + const entry = candidatesByID.get(id)!; + + if (fileDialect === undefined) { + // Shared file + entry.shared = migrationFile; + } else if (fileDialect === dialect) { + // Dialect-specific file matching the active dialect + entry.dialectSpecific = migrationFile; + } + // Files for the OTHER dialect are silently ignored + } + + // For each ID, prefer dialect-specific over shared + const result: MigrationFile[] = []; + for (const [, entry] of candidatesByID) { + const chosen = entry.dialectSpecific ?? entry.shared; + if (chosen) { + result.push(chosen); + } + } + + return result.sort((a, b) => a.id.localeCompare(b.id)); } catch (error) { if ((error as NodeJS.ErrnoException).code === "ENOENT") { - // Migrations directory doesn't exist, return empty array return []; } throw error; @@ -111,11 +141,9 @@ export class MigrationRunner { */ private async getPendingMigrations(): Promise { const appliedMigrations = await this.getAppliedMigrations(); - const appliedIds = new Set(appliedMigrations.map(m => m.id)); - + const appliedIds = new Set(appliedMigrations.map((m) => m.id)); const allMigrations = this.getMigrationFiles(); - - return allMigrations.filter(migration => !appliedIds.has(migration.id)); + return allMigrations.filter((migration) => !appliedIds.has(migration.id)); } /** @@ -123,32 +151,30 @@ export class MigrationRunner { */ private async executeMigration(migration: MigrationFile): Promise { try { - // Read migration file const sql = readFileSync(migration.path, "utf-8"); // Split into statements (handle multi-statement migrations) const statements = sql .split(";") - .map(s => s.trim()) - .filter(s => { - // Filter out empty statements and comment-only statements + .map((s) => s.trim()) + .filter((s) => { if (s.length === 0) return false; // Remove single-line comments and check if anything remains - const withoutComments = s.split('\n') - .map(line => line.replace(/--.*$/, '').trim()) - .filter(line => line.length > 0) - .join('\n'); + const withoutComments = s + .split("\n") + .map((line) => line.replace(/--.*$/, "").trim()) + .filter((line) => line.length > 0) + .join("\n"); return withoutComments.length > 0; }); - // Execute each statement + // Execute each statement individually via the adapter for (const statement of statements) { - await this.execStatement(statement); + await this.db.execute(statement); } // Record migration as applied await this.recordMigration(migration); - } catch (error) { throw new Error( `Failed to execute migration ${migration.filename}: ${error instanceof Error ? error.message : "Unknown error"}` @@ -156,40 +182,15 @@ export class MigrationRunner { } } - /** - * Execute a single SQL statement - */ - private execStatement(sql: string): Promise { - return new Promise((resolve, reject) => { - this.db.exec(sql, (err) => { - if (err) { - reject(err); - } else { - resolve(); - } - }); - }); - } - /** * Record a migration as applied in the migrations table */ private async recordMigration(migration: MigrationFile): Promise { const now = new Date().toISOString(); - - return new Promise((resolve, reject) => { - this.db.run( - "INSERT INTO migrations (id, name, appliedAt) VALUES (?, ?, ?)", - [migration.id, migration.filename, now], - (err) => { - if (err) { - reject(new Error(`Failed to record migration: ${err.message}`)); - } else { - resolve(); - } - } - ); - }); + await this.db.execute( + "INSERT INTO migrations (id, name, appliedAt) VALUES (?, ?, ?)", + [migration.id, migration.filename, now] + ); } /** @@ -198,23 +199,19 @@ export class MigrationRunner { */ public async runPendingMigrations(): Promise { try { - // Initialize migrations table if it doesn't exist await this.initializeMigrationsTable(); - // Get pending migrations const pendingMigrations = await this.getPendingMigrations(); if (pendingMigrations.length === 0) { return 0; } - // Execute each pending migration in order for (const migration of pendingMigrations) { await this.executeMigration(migration); } return pendingMigrations.length; - } catch (error) { throw new Error( `Migration failed: ${error instanceof Error ? error.message : "Unknown error"}` diff --git a/backend/src/database/PostgresAdapter.ts b/backend/src/database/PostgresAdapter.ts new file mode 100644 index 00000000..8cd93e11 --- /dev/null +++ b/backend/src/database/PostgresAdapter.ts @@ -0,0 +1,154 @@ +import pg from "pg"; +import type { DatabaseAdapter } from "./DatabaseAdapter"; +import { DatabaseQueryError, DatabaseConnectionError } from "./errors"; + +/** + * PostgresAdapter implementing DatabaseAdapter using the pg package. + */ +export class PostgresAdapter implements DatabaseAdapter { + private _databaseUrl: string; + private _pool: pg.Pool | null = null; + private _txClient: pg.PoolClient | null = null; + private _connected = false; + + constructor(databaseUrl: string) { + this._databaseUrl = databaseUrl; + } + + async initialize(): Promise { + this._pool = new pg.Pool({ connectionString: this._databaseUrl }); + try { + await this._pool.query("SELECT 1"); + this._connected = true; + } catch (err: unknown) { + const message = + err instanceof Error ? err.message : "Unknown connection error"; + await this._pool.end().catch(() => {}); + this._pool = null; + throw new DatabaseConnectionError( + `Failed to connect to PostgreSQL: ${message}`, + this._databaseUrl, + ); + } + } + + async close(): Promise { + if (this._pool) { + await this._pool.end(); + this._pool = null; + } + this._txClient = null; + this._connected = false; + } + + async query(sql: string, params?: unknown[]): Promise { + const client = this._txClient ?? this._pool; + if (!client) { + throw new DatabaseQueryError("Database not connected", sql, params); + } + try { + const result = await client.query(sql, params); + return result.rows as T[]; + } catch (err: unknown) { + const message = err instanceof Error ? err.message : "Query failed"; + throw new DatabaseQueryError(message, sql, params); + } + } + + async queryOne(sql: string, params?: unknown[]): Promise { + const client = this._txClient ?? this._pool; + if (!client) { + throw new DatabaseQueryError("Database not connected", sql, params); + } + try { + const result = await client.query(sql, params); + return (result.rows[0] as T) ?? null; + } catch (err: unknown) { + const message = err instanceof Error ? err.message : "Query failed"; + throw new DatabaseQueryError(message, sql, params); + } + } + + async execute(sql: string, params?: unknown[]): Promise<{ changes: number }> { + const client = this._txClient ?? this._pool; + if (!client) { + throw new DatabaseQueryError("Database not connected", sql, params); + } + try { + const result = await client.query(sql, params); + return { changes: result.rowCount ?? 0 }; + } catch (err: unknown) { + const message = err instanceof Error ? err.message : "Query failed"; + throw new DatabaseQueryError(message, sql, params); + } + } + + async beginTransaction(): Promise { + if (!this._pool) { + throw new DatabaseQueryError("Database not connected", "BEGIN", []); + } + this._txClient = await this._pool.connect(); + await this._txClient.query("BEGIN"); + } + + async commit(): Promise { + if (!this._txClient) { + throw new Error("No active transaction to commit"); + } + try { + await this._txClient.query("COMMIT"); + } finally { + this._txClient.release(); + this._txClient = null; + } + } + + async rollback(): Promise { + if (!this._txClient) { + throw new Error("No active transaction to rollback"); + } + try { + await this._txClient.query("ROLLBACK"); + } finally { + this._txClient.release(); + this._txClient = null; + } + } + + async withTransaction(fn: () => Promise): Promise { + if (!this._pool) { + throw new DatabaseQueryError( + "Database not connected", + "BEGIN TRANSACTION", + [], + ); + } + const client = await this._pool.connect(); + const previousClient = this._txClient; + this._txClient = client; + try { + await client.query("BEGIN"); + const result = await fn(); + await client.query("COMMIT"); + return result; + } catch (error) { + await client.query("ROLLBACK"); + throw error; + } finally { + client.release(); + this._txClient = previousClient; + } + } + + isConnected(): boolean { + return this._connected; + } + + getDialect(): "sqlite" | "postgres" { + return "postgres"; + } + + getPlaceholder(index: number): string { + return "$" + String(index); + } +} diff --git a/backend/src/database/SQLiteAdapter.ts b/backend/src/database/SQLiteAdapter.ts new file mode 100644 index 00000000..4ff97f2b --- /dev/null +++ b/backend/src/database/SQLiteAdapter.ts @@ -0,0 +1,186 @@ +import sqlite3 from "sqlite3"; +import { dirname } from "path"; +import { mkdirSync } from "fs"; +import type { DatabaseAdapter } from "./DatabaseAdapter"; +import { DatabaseQueryError, DatabaseConnectionError } from "./errors"; + +/** + * SQLiteAdapter implementing DatabaseAdapter using the sqlite3 package. + */ +export class SQLiteAdapter implements DatabaseAdapter { + private _databasePath: string; + private _db: sqlite3.Database | null = null; + private _connected = false; + private _inTransaction = false; + + constructor(databasePath: string) { + this._databasePath = databasePath; + } + + async initialize(): Promise { + if (this._databasePath !== ":memory:") { + const dir = dirname(this._databasePath); + mkdirSync(dir, { recursive: true }); + } + + return new Promise((resolve, reject) => { + const db = new sqlite3.Database(this._databasePath, (err) => { + if (err) { + reject( + new DatabaseConnectionError( + `Failed to open SQLite database: ${err.message}`, + this._databasePath, + ), + ); + return; + } + + db.run("PRAGMA journal_mode = WAL;", (walErr) => { + if (walErr) { + reject( + new DatabaseConnectionError( + `Failed to enable WAL mode: ${walErr.message}`, + this._databasePath, + ), + ); + return; + } + + db.run("PRAGMA foreign_keys = ON;", (fkErr) => { + if (fkErr) { + reject( + new DatabaseConnectionError( + `Failed to enable foreign keys: ${fkErr.message}`, + this._databasePath, + ), + ); + return; + } + + this._db = db; + this._connected = true; + resolve(); + }); + }); + }); + }); + } + + async close(): Promise { + if (!this._db) { + return; + } + + const db = this._db; + return new Promise((resolve, reject) => { + db.close((err) => { + if (err) { + reject( + new DatabaseConnectionError( + `Failed to close SQLite database: ${err.message}`, + this._databasePath, + ), + ); + return; + } + this._db = null; + this._connected = false; + this._inTransaction = false; + resolve(); + }); + }); + } + + query(sql: string, params?: unknown[]): Promise { + if (!this._db) { + throw new DatabaseQueryError("Database not connected", sql, params); + } + + const db = this._db; + return new Promise((resolve, reject) => { + db.all(sql, params ?? [], (err, rows) => { + if (err) { + reject(new DatabaseQueryError(err.message, sql, params)); + return; + } + resolve((rows ?? []) as T[]); + }); + }); + } + + queryOne(sql: string, params?: unknown[]): Promise { + if (!this._db) { + throw new DatabaseQueryError("Database not connected", sql, params); + } + + const db = this._db; + return new Promise((resolve, reject) => { + db.get(sql, params ?? [], (err, row) => { + if (err) { + reject(new DatabaseQueryError(err.message, sql, params)); + return; + } + resolve((row as T) ?? null); + }); + }); + } + + execute(sql: string, params?: unknown[]): Promise<{ changes: number }> { + if (!this._db) { + throw new DatabaseQueryError("Database not connected", sql, params); + } + + const db = this._db; + return new Promise<{ changes: number }>((resolve, reject) => { + db.run(sql, params ?? [], function (err) { + if (err) { + reject(new DatabaseQueryError(err.message, sql, params)); + return; + } + resolve({ changes: this.changes }); + }); + }); + } + + async beginTransaction(): Promise { + if (this._inTransaction) { + throw new Error("Nested transactions are not supported in SQLite"); + } + await this.execute("BEGIN TRANSACTION"); + this._inTransaction = true; + } + + async commit(): Promise { + await this.execute("COMMIT"); + this._inTransaction = false; + } + + async rollback(): Promise { + await this.execute("ROLLBACK"); + this._inTransaction = false; + } + + async withTransaction(fn: () => Promise): Promise { + await this.beginTransaction(); + try { + const result = await fn(); + await this.commit(); + return result; + } catch (error) { + await this.rollback(); + throw error; + } + } + + isConnected(): boolean { + return this._connected; + } + + getDialect(): "sqlite" | "postgres" { + return "sqlite"; + } + + getPlaceholder(_index: number): string { + return "?"; + } +} diff --git a/backend/src/database/audit-schema.sql b/backend/src/database/audit-schema.sql deleted file mode 100644 index 7661d731..00000000 --- a/backend/src/database/audit-schema.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Audit Logging Schema --- Comprehensive audit trail for security monitoring and compliance --- Logs authentication, authorization, and administrative actions - --- Audit logs table: Records all security-relevant events -CREATE TABLE IF NOT EXISTS audit_logs ( - id TEXT PRIMARY KEY, -- UUID - timestamp TEXT NOT NULL, -- ISO 8601 timestamp - eventType TEXT NOT NULL, -- Event category: 'auth', 'authz', 'admin', 'user', 'role', 'permission' - "action" TEXT NOT NULL, -- Specific action: 'login_success', 'login_failure', 'permission_denied', etc. - userId TEXT, -- User who performed the action (NULL for failed login attempts) - targetUserId TEXT, -- User affected by the action (for admin operations) - targetResourceType TEXT, -- Type of resource affected: 'user', 'role', 'group', 'permission' - targetResourceId TEXT, -- ID of the affected resource - ipAddress TEXT, -- Source IP address - userAgent TEXT, -- User agent string - details TEXT, -- JSON string with additional context - result TEXT NOT NULL, -- Result: 'success', 'failure', 'denied' - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE SET NULL, - FOREIGN KEY (targetUserId) REFERENCES users(id) ON DELETE SET NULL -); - --- Performance indexes for audit log queries -CREATE INDEX IF NOT EXISTS idx_audit_logs_timestamp ON audit_logs(timestamp); -CREATE INDEX IF NOT EXISTS idx_audit_logs_event_type ON audit_logs(eventType); -CREATE INDEX IF NOT EXISTS idx_audit_logs_user_id ON audit_logs(userId); -CREATE INDEX IF NOT EXISTS idx_audit_logs_target_user_id ON audit_logs(targetUserId); -CREATE INDEX IF NOT EXISTS idx_audit_logs_action ON audit_logs("action"); -CREATE INDEX IF NOT EXISTS idx_audit_logs_result ON audit_logs(result); -CREATE INDEX IF NOT EXISTS idx_audit_logs_ip_address ON audit_logs(ipAddress); - --- Composite index for common queries (user activity over time) -CREATE INDEX IF NOT EXISTS idx_audit_logs_user_timestamp ON audit_logs(userId, timestamp); diff --git a/backend/src/database/errors.ts b/backend/src/database/errors.ts new file mode 100644 index 00000000..b0cd5242 --- /dev/null +++ b/backend/src/database/errors.ts @@ -0,0 +1,27 @@ +/** + * Error thrown when a database query fails. + */ +export class DatabaseQueryError extends Error { + public readonly query: string; + public readonly params: unknown[] | undefined; + + constructor(message: string, query: string, params?: unknown[]) { + super(message); + this.name = "DatabaseQueryError"; + this.query = query; + this.params = params; + } +} + +/** + * Error thrown when a database connection fails. + */ +export class DatabaseConnectionError extends Error { + public readonly connectionDetails: string; + + constructor(message: string, connectionDetails: string) { + super(message); + this.name = "DatabaseConnectionError"; + this.connectionDetails = connectionDetails; + } +} diff --git a/backend/src/database/migrations.sql b/backend/src/database/migrations.sql deleted file mode 100644 index 52594db8..00000000 --- a/backend/src/database/migrations.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Migration: Add command and expert_mode columns to executions table --- These columns were added to support command execution tracking and expert mode flag - --- Add command column if it doesn't exist -ALTER TABLE executions ADD COLUMN command TEXT; - --- Add expert_mode column if it doesn't exist -ALTER TABLE executions ADD COLUMN expert_mode INTEGER DEFAULT 0; - --- Migration: Add re-execution tracking fields --- These columns support linking re-executed actions to their original executions - --- Add original_execution_id column if it doesn't exist -ALTER TABLE executions ADD COLUMN original_execution_id TEXT; - --- Add re_execution_count column if it doesn't exist -ALTER TABLE executions ADD COLUMN re_execution_count INTEGER DEFAULT 0; - --- Create index for finding re-executions by original execution ID -CREATE INDEX IF NOT EXISTS idx_executions_original_id ON executions(original_execution_id); - --- Migration: Add stdout and stderr columns for expert mode complete output capture --- These columns store the full command output when expert mode is enabled - --- Add stdout column if it doesn't exist -ALTER TABLE executions ADD COLUMN stdout TEXT; - --- Add stderr column if it doesn't exist -ALTER TABLE executions ADD COLUMN stderr TEXT; - --- Migration: Add execution_tool column to indicate which execution engine was used --- Values: bolt, ansible -ALTER TABLE executions ADD COLUMN execution_tool TEXT DEFAULT 'bolt'; diff --git a/backend/src/database/schema.sql b/backend/src/database/migrations/000_initial_schema.sql similarity index 96% rename from backend/src/database/schema.sql rename to backend/src/database/migrations/000_initial_schema.sql index 1a262e7f..2fd968ce 100644 --- a/backend/src/database/schema.sql +++ b/backend/src/database/migrations/000_initial_schema.sql @@ -1,3 +1,7 @@ +-- Migration 000: Initial Schema +-- Creates the base executions table and revoked_tokens table +-- This is the foundation schema for the Pabawi application + -- Executions table for storing command and task execution history CREATE TABLE IF NOT EXISTS executions ( id TEXT PRIMARY KEY, diff --git a/backend/src/database/migrations/007_permissions_and_provisioner_role.sql b/backend/src/database/migrations/007_permissions_and_provisioner_role.sql new file mode 100644 index 00000000..22e650dc --- /dev/null +++ b/backend/src/database/migrations/007_permissions_and_provisioner_role.sql @@ -0,0 +1,104 @@ +-- Migration: 007_new_permissions_and_provisioner_role +-- Description: Seed new fine-grained permissions for proxmox, aws, journal, and +-- integration_config resources. Create the Provisioner built-in role +-- and assign all new permissions to the Administrator role. +-- Date: 2025-01-20 +-- Requirements: 27.1, 27.2, 27.3, 27.4, 27.5, 28.1, 28.2, 28.3, 28.4, 28.5, 29.2, 29.3, 29.4 + +-- ============================================================================ +-- PERMISSIONS: New fine-grained permissions for 1.0.0 features +-- ============================================================================ + +-- Proxmox permissions (enhanced granularity for single plugin) +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('proxmox-read-001', 'proxmox', 'read', 'View Proxmox VMs and containers', datetime('now')), + ('proxmox-lifecycle-001', 'proxmox', 'lifecycle', 'Start/stop/reboot VMs and containers', datetime('now')), + ('proxmox-provision-001', 'proxmox', 'provision', 'Create new VMs and containers', datetime('now')), + ('proxmox-destroy-001', 'proxmox', 'destroy', 'Destroy/decommission VMs and containers', datetime('now')), + ('proxmox-admin-001', 'proxmox', 'admin', 'Full Proxmox management', datetime('now')); + +-- AWS permissions (single plugin, EC2 initially) +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('aws-read-001', 'aws', 'read', 'View AWS resources', datetime('now')), + ('aws-lifecycle-001', 'aws', 'lifecycle', 'Start/stop/reboot AWS instances', datetime('now')), + ('aws-provision-001', 'aws', 'provision', 'Launch new AWS resources', datetime('now')), + ('aws-destroy-001', 'aws', 'destroy', 'Terminate AWS resources', datetime('now')), + ('aws-admin-001', 'aws', 'admin', 'Full AWS management', datetime('now')); + +-- Journal permissions +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('journal-read-001', 'journal', 'read', 'View journal entries', datetime('now')), + ('journal-note-001', 'journal', 'note', 'Add manual notes', datetime('now')), + ('journal-admin-001', 'journal', 'admin', 'Manage journal entries', datetime('now')); + +-- Integration config permissions +INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('integration_config-read-001', 'integration_config', 'read', 'View integration configs', datetime('now')), + ('integration_config-configure-001', 'integration_config', 'configure', 'Modify integration configs', datetime('now')), + ('integration_config-admin-001', 'integration_config', 'admin', 'Full config management', datetime('now')); + +-- ============================================================================ +-- ROLES: Create Provisioner built-in role +-- ============================================================================ + +INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES + ('role-provisioner-001', 'Provisioner', 'Provision and manage infrastructure resources', 1, datetime('now'), datetime('now')); + +-- ============================================================================ +-- ROLE-PERMISSION ASSIGNMENTS: Provisioner role +-- ============================================================================ + +-- Provisioner: read, provision, destroy, lifecycle for proxmox +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-provisioner-001', 'proxmox-read-001', datetime('now')), + ('role-provisioner-001', 'proxmox-provision-001', datetime('now')), + ('role-provisioner-001', 'proxmox-destroy-001', datetime('now')), + ('role-provisioner-001', 'proxmox-lifecycle-001', datetime('now')); + +-- Provisioner: read, provision, destroy, lifecycle for aws +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-provisioner-001', 'aws-read-001', datetime('now')), + ('role-provisioner-001', 'aws-provision-001', datetime('now')), + ('role-provisioner-001', 'aws-destroy-001', datetime('now')), + ('role-provisioner-001', 'aws-lifecycle-001', datetime('now')); + +-- Provisioner: read, note for journal +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-provisioner-001', 'journal-read-001', datetime('now')), + ('role-provisioner-001', 'journal-note-001', datetime('now')); + +-- Provisioner: read for integration_config +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-provisioner-001', 'integration_config-read-001', datetime('now')); + +-- ============================================================================ +-- ROLE-PERMISSION ASSIGNMENTS: Administrator role — all new permissions +-- ============================================================================ + +-- Administrator: all proxmox permissions +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-admin-001', 'proxmox-read-001', datetime('now')), + ('role-admin-001', 'proxmox-lifecycle-001', datetime('now')), + ('role-admin-001', 'proxmox-provision-001', datetime('now')), + ('role-admin-001', 'proxmox-destroy-001', datetime('now')), + ('role-admin-001', 'proxmox-admin-001', datetime('now')); + +-- Administrator: all aws permissions +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-admin-001', 'aws-read-001', datetime('now')), + ('role-admin-001', 'aws-lifecycle-001', datetime('now')), + ('role-admin-001', 'aws-provision-001', datetime('now')), + ('role-admin-001', 'aws-destroy-001', datetime('now')), + ('role-admin-001', 'aws-admin-001', datetime('now')); + +-- Administrator: all journal permissions +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-admin-001', 'journal-read-001', datetime('now')), + ('role-admin-001', 'journal-note-001', datetime('now')), + ('role-admin-001', 'journal-admin-001', datetime('now')); + +-- Administrator: all integration_config permissions +INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES + ('role-admin-001', 'integration_config-read-001', datetime('now')), + ('role-admin-001', 'integration_config-configure-001', datetime('now')), + ('role-admin-001', 'integration_config-admin-001', datetime('now')); diff --git a/backend/src/database/migrations/008_journal_entries.sql b/backend/src/database/migrations/008_journal_entries.sql new file mode 100644 index 00000000..7e971e3e --- /dev/null +++ b/backend/src/database/migrations/008_journal_entries.sql @@ -0,0 +1,34 @@ +-- Migration 008: Journal Entries +-- Add journal_entries table for tracking provisioning events, lifecycle actions, +-- execution results, and manual notes per inventory node. +-- Requirements: 25.1, 25.2, 25.3, 26.1, 26.2, 26.3, 26.4 + +-- Journal entries table: Records all node-related events +CREATE TABLE IF NOT EXISTS journal_entries ( + id TEXT PRIMARY KEY, + nodeId TEXT NOT NULL, + nodeUri TEXT NOT NULL, + eventType TEXT NOT NULL CHECK (eventType IN ( + 'provision', 'destroy', 'start', 'stop', 'reboot', 'suspend', 'resume', + 'command_execution', 'task_execution', 'puppet_run', 'package_install', + 'config_change', 'note', 'error', 'warning', 'info' + )), + source TEXT NOT NULL CHECK (source IN ( + 'proxmox', 'aws', 'bolt', 'ansible', 'ssh', 'puppetdb', 'user', 'system' + )), + "action" TEXT NOT NULL, + summary TEXT NOT NULL, + details TEXT, -- JSON + userId TEXT, + timestamp TEXT NOT NULL, + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE SET NULL +); + +-- Performance indexes for journal queries +CREATE INDEX IF NOT EXISTS idx_journal_node ON journal_entries(nodeId); +CREATE INDEX IF NOT EXISTS idx_journal_timestamp ON journal_entries(timestamp DESC); +CREATE INDEX IF NOT EXISTS idx_journal_type ON journal_entries(eventType); +CREATE INDEX IF NOT EXISTS idx_journal_source ON journal_entries(source); + +-- Composite index for node timeline queries (nodeId + timestamp descending) +CREATE INDEX IF NOT EXISTS idx_journal_node_time ON journal_entries(nodeId, timestamp DESC); diff --git a/backend/src/database/migrations/009_integration_configs.sql b/backend/src/database/migrations/009_integration_configs.sql new file mode 100644 index 00000000..c1eae06c --- /dev/null +++ b/backend/src/database/migrations/009_integration_configs.sql @@ -0,0 +1,22 @@ +-- Migration 009: Integration Configs +-- Add integration_configs table for storing per-user integration configurations +-- with encrypted sensitive fields and unique constraint per user/integration. +-- Requirements: 32.1, 32.2, 32.3, 32.4 + +-- Integration configs table: Stores per-user integration settings +CREATE TABLE IF NOT EXISTS integration_configs ( + id TEXT PRIMARY KEY, + userId TEXT NOT NULL, + integrationName TEXT NOT NULL, + config TEXT NOT NULL, -- JSON, sensitive fields encrypted + isActive INTEGER NOT NULL DEFAULT 1, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL, + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, + UNIQUE(userId, integrationName) +); + +-- Performance indexes for integration config queries +CREATE INDEX IF NOT EXISTS idx_integration_configs_user ON integration_configs(userId); +CREATE INDEX IF NOT EXISTS idx_integration_configs_name ON integration_configs(integrationName); +CREATE INDEX IF NOT EXISTS idx_integration_configs_active ON integration_configs(isActive); diff --git a/backend/src/database/rbac-schema.sql b/backend/src/database/rbac-schema.sql deleted file mode 100644 index d31c8ede..00000000 --- a/backend/src/database/rbac-schema.sql +++ /dev/null @@ -1,145 +0,0 @@ --- RBAC Authorization System Database Schema --- This schema implements Role-Based Access Control with users, groups, roles, and permissions --- All IDs are UUIDs, timestamps are ISO 8601 format - --- Users table: Core user accounts with authentication credentials -CREATE TABLE IF NOT EXISTS users ( - id TEXT PRIMARY KEY, -- UUID - username TEXT NOT NULL UNIQUE, - email TEXT NOT NULL UNIQUE, - passwordHash TEXT NOT NULL, -- bcrypt hash - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER NOT NULL DEFAULT 1, -- Boolean: 1 = active, 0 = inactive - isAdmin INTEGER NOT NULL DEFAULT 0, -- Boolean: 1 = admin, 0 = regular user - createdAt TEXT NOT NULL, -- ISO 8601 timestamp - updatedAt TEXT NOT NULL, -- ISO 8601 timestamp - lastLoginAt TEXT -- ISO 8601 timestamp, NULL if never logged in -); - --- Groups table: Collections of users for permission management -CREATE TABLE IF NOT EXISTS groups ( - id TEXT PRIMARY KEY, -- UUID - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - createdAt TEXT NOT NULL, -- ISO 8601 timestamp - updatedAt TEXT NOT NULL -- ISO 8601 timestamp -); - --- Roles table: Named sets of permissions -CREATE TABLE IF NOT EXISTS roles ( - id TEXT PRIMARY KEY, -- UUID - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - isBuiltIn INTEGER NOT NULL DEFAULT 0, -- Boolean: 1 = system role (protected), 0 = custom role - createdAt TEXT NOT NULL, -- ISO 8601 timestamp - updatedAt TEXT NOT NULL -- ISO 8601 timestamp -); - --- Permissions table: Specific resource-action authorizations -CREATE TABLE IF NOT EXISTS permissions ( - id TEXT PRIMARY KEY, -- UUID - resource TEXT NOT NULL, -- Resource identifier (e.g., 'ansible', 'bolt', 'puppetdb') - "action" TEXT NOT NULL, -- Action identifier (e.g., 'read', 'write', 'execute', 'admin') - description TEXT NOT NULL, - createdAt TEXT NOT NULL, -- ISO 8601 timestamp - UNIQUE(resource, "action") -- Each resource-action combination must be unique -); - --- User-Group junction table: Many-to-many relationship between users and groups -CREATE TABLE IF NOT EXISTS user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, -- ISO 8601 timestamp - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE -); - --- User-Role junction table: Direct role assignments to users -CREATE TABLE IF NOT EXISTS user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, -- ISO 8601 timestamp - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE -); - --- Group-Role junction table: Role assignments to groups -CREATE TABLE IF NOT EXISTS group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, -- ISO 8601 timestamp - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE -); - --- Role-Permission junction table: Permission assignments to roles -CREATE TABLE IF NOT EXISTS role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, -- ISO 8601 timestamp - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, - FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE -); - --- Revoked tokens table: JWT token revocation list for logout and security -CREATE TABLE IF NOT EXISTS revoked_tokens ( - token TEXT PRIMARY KEY, -- Hashed JWT token - userId TEXT NOT NULL, - revokedAt TEXT NOT NULL, -- ISO 8601 timestamp - expiresAt TEXT NOT NULL, -- ISO 8601 timestamp (token expiration) - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE -); - --- Performance Indexes --- User lookups by username and email (authentication) -CREATE INDEX IF NOT EXISTS idx_users_username ON users(username); -CREATE INDEX IF NOT EXISTS idx_users_email ON users(email); -CREATE INDEX IF NOT EXISTS idx_users_active ON users(isActive); - --- Permission check optimization: Direct user-role path -CREATE INDEX IF NOT EXISTS idx_user_roles_user ON user_roles(userId); -CREATE INDEX IF NOT EXISTS idx_user_roles_role ON user_roles(roleId); - --- Permission check optimization: User-group-role path -CREATE INDEX IF NOT EXISTS idx_user_groups_user ON user_groups(userId); -CREATE INDEX IF NOT EXISTS idx_user_groups_group ON user_groups(groupId); -CREATE INDEX IF NOT EXISTS idx_group_roles_group ON group_roles(groupId); -CREATE INDEX IF NOT EXISTS idx_group_roles_role ON group_roles(roleId); - --- Permission check optimization: Role-permission lookup -CREATE INDEX IF NOT EXISTS idx_role_permissions_role ON role_permissions(roleId); -CREATE INDEX IF NOT EXISTS idx_role_permissions_perm ON role_permissions(permissionId); - --- Permission lookups by resource and action -CREATE INDEX IF NOT EXISTS idx_permissions_resource_action ON permissions(resource, "action"); - --- Token revocation checks -CREATE INDEX IF NOT EXISTS idx_revoked_tokens_token ON revoked_tokens(token); -CREATE INDEX IF NOT EXISTS idx_revoked_tokens_expires ON revoked_tokens(expiresAt); -CREATE INDEX IF NOT EXISTS idx_revoked_tokens_user ON revoked_tokens(userId); - --- Composite indexes for optimized permission checks --- Composite index for direct user-role-permission path lookup -CREATE INDEX IF NOT EXISTS idx_user_roles_composite ON user_roles(userId, roleId); - --- Composite index for user-group-role path lookup -CREATE INDEX IF NOT EXISTS idx_user_groups_composite ON user_groups(userId, groupId); -CREATE INDEX IF NOT EXISTS idx_group_roles_composite ON group_roles(groupId, roleId); - --- Composite index for role-permission lookup -CREATE INDEX IF NOT EXISTS idx_role_permissions_composite ON role_permissions(roleId, permissionId); - --- Configuration table: Application settings and setup configuration -CREATE TABLE IF NOT EXISTS config ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL, - updatedAt TEXT NOT NULL -- ISO 8601 timestamp -); - --- Index for config lookups -CREATE INDEX IF NOT EXISTS idx_config_key ON config(key); diff --git a/backend/src/integrations/IntegrationManager.ts b/backend/src/integrations/IntegrationManager.ts index c4b5721b..fc3eabb8 100644 --- a/backend/src/integrations/IntegrationManager.ts +++ b/backend/src/integrations/IntegrationManager.ts @@ -32,7 +32,7 @@ export interface HealthCheckCacheEntry { * Aggregated inventory from multiple sources */ export interface AggregatedInventory { - nodes: Node[]; + nodes: LinkedNode[]; /** Groups aggregated from all sources */ groups: NodeGroup[]; sources: Record< @@ -236,6 +236,75 @@ export class IntegrationManager { return Array.from(this.plugins.values()); } + /** + * Get provisioning capabilities from all execution tools + * + * Queries all execution tool plugins that support provisioning capabilities + * and aggregates them into a single list with source attribution. + * + * @returns Array of provisioning capabilities from all plugins + */ + getAllProvisioningCapabilities(): Array<{ + source: string; + capabilities: Array<{ + name: string; + description: string; + operation: "create" | "destroy"; + parameters: Array<{ + name: string; + type: string; + required: boolean; + default?: unknown; + }>; + }>; + }> { + const result: Array<{ + source: string; + capabilities: Array<{ + name: string; + description: string; + operation: "create" | "destroy"; + parameters: Array<{ + name: string; + type: string; + required: boolean; + default?: unknown; + }>; + }>; + }> = []; + + for (const [name, tool] of this.executionTools) { + // Check if the plugin has listProvisioningCapabilities method + if ( + "listProvisioningCapabilities" in tool && + typeof tool.listProvisioningCapabilities === "function" + ) { + try { + const capabilities = tool.listProvisioningCapabilities(); + if (capabilities && capabilities.length > 0) { + result.push({ + source: name, + capabilities, + }); + } + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + this.logger.error( + `Failed to get provisioning capabilities from '${name}'`, + { + component: "IntegrationManager", + operation: "getAllProvisioningCapabilities", + metadata: { sourceName: name }, + }, + err + ); + } + } + } + + return result; + } + /** * Execute an action using the specified execution tool * @@ -273,14 +342,12 @@ export class IntegrationManager { nodes: LinkedNode[]; sources: AggregatedInventory["sources"]; }> { - // Get aggregated inventory + // getAggregatedInventory already deduplicates and links nodes via deduplicateNodes → linkNodes. + // The returned nodes are already LinkedNode[] (with sources, sourceData, etc.). const aggregated = await this.getAggregatedInventory(); - // Link nodes across sources - const linkedNodes = this.nodeLinkingService.linkNodes(aggregated.nodes); - return { - nodes: linkedNodes, + nodes: aggregated.nodes as LinkedNode[], sources: aggregated.sources, }; } @@ -372,24 +439,37 @@ export class IntegrationManager { return { nodes: [], groups: [] }; } - // Fetch nodes and groups in parallel + // Fetch nodes and groups in parallel with per-source timeout + // Prevents a single slow source from blocking the entire inventory + const SOURCE_TIMEOUT_MS = 15_000; + this.logger.debug(`Calling getInventory() and getGroups() on source '${name}'`, { component: "IntegrationManager", operation: "getAggregatedInventory", metadata: { sourceName: name }, }); - const [nodes, groups] = await Promise.all([ - source.getInventory(), - source.getGroups().catch((error: unknown) => { - const err = error instanceof Error ? error : new Error(String(error)); - this.logger.error(`Failed to get groups from '${name}', continuing with nodes only`, { - component: "IntegrationManager", - operation: "getAggregatedInventory", - metadata: { sourceName: name }, - }, err); - return []; - }), + const timeoutPromise = new Promise((_, reject) => + setTimeout( + () => reject(new Error(`Source '${name}' timed out after ${String(SOURCE_TIMEOUT_MS)}ms`)), + SOURCE_TIMEOUT_MS, + ), + ); + + const [nodes, groups] = await Promise.race([ + Promise.all([ + source.getInventory(), + source.getGroups().catch((error: unknown) => { + const err = error instanceof Error ? error : new Error(String(error)); + this.logger.error(`Failed to get groups from '${name}', continuing with nodes only`, { + component: "IntegrationManager", + operation: "getAggregatedInventory", + metadata: { sourceName: name }, + }, err); + return [] as NodeGroup[]; + }), + ]), + timeoutPromise, ]); this.logger.debug(`Source '${name}' returned ${String(nodes.length)} nodes and ${String(groups.length)} groups`, { @@ -843,41 +923,19 @@ export class IntegrationManager { return true; } - /** - * Deduplicate nodes by ID, preferring nodes from higher priority sources + /** + * Deduplicate and link nodes by matching identifiers. + * + * When multiple sources provide the same node (matched by identifiers like certname, + * hostname, or URI), merge them into a single node entry with all sources tracked. + * The node data is taken from the highest priority source, but all sources and URIs + * are recorded in sourceData. * - * @param nodes - Array of nodes potentially with duplicates - * @returns Deduplicated array of nodes + * @param nodes - Array of nodes from all sources + * @returns Deduplicated and linked array of nodes with source attribution */ - private deduplicateNodes(nodes: Node[]): Node[] { - const nodeMap = new Map(); - - for (const node of nodes) { - const existing = nodeMap.get(node.id); - - if (!existing) { - nodeMap.set(node.id, node); - continue; - } - - // Get priority for both nodes - const existingSource = (existing as Node & { source?: string }).source; - const newSource = (node as Node & { source?: string }).source; - - const existingPriority = existingSource - ? (this.plugins.get(existingSource)?.config.priority ?? 0) - : 0; - const newPriority = newSource - ? (this.plugins.get(newSource)?.config.priority ?? 0) - : 0; - - // Keep node from higher priority source - if (newPriority > existingPriority) { - nodeMap.set(node.id, node); - } - } - - return Array.from(nodeMap.values()); + private deduplicateNodes(nodes: Node[]): LinkedNode[] { + return this.nodeLinkingService.linkNodes(nodes); } /** diff --git a/backend/src/integrations/NodeLinkingService.ts b/backend/src/integrations/NodeLinkingService.ts index 6f95cbe9..1a62f59c 100644 --- a/backend/src/integrations/NodeLinkingService.ts +++ b/backend/src/integrations/NodeLinkingService.ts @@ -10,13 +10,27 @@ import type { IntegrationManager } from "./IntegrationManager"; import { LoggerService } from "../services/LoggerService"; /** - * Linked node with source attribution + * Source-specific node data + */ +export interface SourceNodeData { + id: string; + uri: string; + config?: Record; + metadata?: Record; + status?: string; +} + +/** + * Linked node with source attribution and source-specific data */ export interface LinkedNode extends Node { sources: string[]; // List of sources this node appears in linked: boolean; // True if node exists in multiple sources certificateStatus?: "signed" | "requested" | "revoked"; lastCheckIn?: string; + + // Source-specific data (keeps original IDs and URIs per source) + sourceData: Record; } /** @@ -56,89 +70,133 @@ export class NodeLinkingService { * @returns Linked nodes with source attribution */ linkNodes(nodes: Node[]): LinkedNode[] { - // First, group nodes by their identifiers - const identifierToNodes = new Map(); + // First, group nodes by their identifiers + const identifierToNodes = new Map(); - for (const node of nodes) { - const identifiers = this.extractIdentifiers(node); + for (const node of nodes) { + const identifiers = this.extractIdentifiers(node); - // Add node to all matching identifier groups - for (const identifier of identifiers) { - const group = identifierToNodes.get(identifier) ?? []; - group.push(node); - identifierToNodes.set(identifier, group); + // Add node to all matching identifier groups + for (const identifier of identifiers) { + const group = identifierToNodes.get(identifier) ?? []; + group.push(node); + identifierToNodes.set(identifier, group); + } } - } - // Now merge nodes that share any identifier - const processedNodes = new Set(); - const linkedNodes: LinkedNode[] = []; + // Now merge nodes that share any identifier + const processedNodes = new Set(); + const linkedNodes: LinkedNode[] = []; - for (const node of nodes) { - if (processedNodes.has(node)) continue; + for (const node of nodes) { + if (processedNodes.has(node)) continue; - // Find all nodes that share any identifier with this node - const identifiers = this.extractIdentifiers(node); - const relatedNodes = new Set(); - relatedNodes.add(node); - - // Collect all nodes that share any identifier - for (const identifier of identifiers) { - const group = identifierToNodes.get(identifier) ?? []; - for (const relatedNode of group) { - relatedNodes.add(relatedNode); + // Find all nodes that share any identifier with this node + const identifiers = this.extractIdentifiers(node); + const relatedNodes = new Set(); + relatedNodes.add(node); + + // Collect all nodes that share any identifier + for (const identifier of identifiers) { + const group = identifierToNodes.get(identifier) ?? []; + for (const relatedNode of group) { + relatedNodes.add(relatedNode); + } } - } - // Create linked node from all related nodes - const linkedNode: LinkedNode = { - ...node, - sources: [], - linked: false, - }; + // Use the first node's name as the primary identifier + // (all related nodes should have the same name) + const primaryName = node.name; + + // Create linked node with common name + const linkedNode: LinkedNode = { + id: primaryName, // Use name as primary ID for lookups + name: primaryName, + uri: node.uri, // Will be overwritten with combined URIs + transport: node.transport, + config: node.config, + sources: [], + linked: false, + sourceData: {}, + }; + + // Collect source-specific data from all related nodes + const allUris: string[] = []; - // Merge data from all related nodes - for (const relatedNode of relatedNodes) { - processedNodes.add(relatedNode); + for (const relatedNode of relatedNodes) { + processedNodes.add(relatedNode); - const nodeSource = - (relatedNode as Node & { source?: string }).source ?? "bolt"; + const nodeSource = + (relatedNode as Node & { source?: string }).source ?? "bolt"; - if (!linkedNode.sources.includes(nodeSource)) { - linkedNode.sources.push(nodeSource); - } + if (!linkedNode.sources.includes(nodeSource)) { + linkedNode.sources.push(nodeSource); + } - // Merge certificate status (prefer from puppetserver) - if (nodeSource === "puppetserver") { - const nodeWithCert = relatedNode as Node & { - certificateStatus?: "signed" | "requested" | "revoked"; + // Store source-specific data + linkedNode.sourceData[nodeSource] = { + id: relatedNode.id, + uri: relatedNode.uri, + config: relatedNode.config, + metadata: (relatedNode as Node & { metadata?: Record }).metadata, + status: (relatedNode as Node & { status?: string }).status, }; - if (nodeWithCert.certificateStatus) { - linkedNode.certificateStatus = nodeWithCert.certificateStatus; + + // Collect URIs + allUris.push(relatedNode.uri); + + // Merge certificate status (prefer from puppetserver) + if (nodeSource === "puppetserver") { + const nodeWithCert = relatedNode as Node & { + certificateStatus?: "signed" | "requested" | "revoked"; + }; + if (nodeWithCert.certificateStatus) { + linkedNode.certificateStatus = nodeWithCert.certificateStatus; + } } - } - // Merge last check-in (use most recent) - const nodeWithCheckIn = relatedNode as Node & { lastCheckIn?: string }; - if (nodeWithCheckIn.lastCheckIn) { - if ( - !linkedNode.lastCheckIn || - new Date(nodeWithCheckIn.lastCheckIn) > - new Date(linkedNode.lastCheckIn) - ) { - linkedNode.lastCheckIn = nodeWithCheckIn.lastCheckIn; + // Merge last check-in (use most recent) + const nodeWithCheckIn = relatedNode as Node & { lastCheckIn?: string }; + if (nodeWithCheckIn.lastCheckIn) { + if ( + !linkedNode.lastCheckIn || + new Date(nodeWithCheckIn.lastCheckIn) > + new Date(linkedNode.lastCheckIn) + ) { + linkedNode.lastCheckIn = nodeWithCheckIn.lastCheckIn; + } } } - } - // Mark as linked if from multiple sources - linkedNode.linked = linkedNode.sources.length > 1; + // Keep uri as the primary URI from the first non-empty source. + // Source-specific URIs are preserved in sourceData[source].uri. + const primaryUri = allUris.find((u) => u) ?? linkedNode.uri; + linkedNode.uri = primaryUri; - linkedNodes.push(linkedNode); - } + // Mark as linked if from multiple sources + linkedNode.linked = linkedNode.sources.length > 1; - return linkedNodes; - } + // Set source (singular) to the primary source for backward compatibility + // This ensures code that reads node.source still works correctly + linkedNode.source = linkedNode.sources[0]; + + this.logger.debug("Created linked node", { + component: "NodeLinkingService", + operation: "linkNodes", + metadata: { + nodeId: linkedNode.id, + nodeName: linkedNode.name, + sources: linkedNode.sources, + linked: linkedNode.linked, + sourceDataKeys: Object.keys(linkedNode.sourceData), + }, + }); + + linkedNodes.push(linkedNode); + } + + return linkedNodes; + } /** * Get all data for a linked node from all sources @@ -275,18 +333,21 @@ export class NodeLinkingService { private extractIdentifiers(node: Node): string[] { const identifiers: string[] = []; - // Add node ID + // Add node ID (always unique per source) if (node.id) { identifiers.push(node.id.toLowerCase()); } - // Add node name (certname) - if (node.name) { + // Add node name (certname) - used for cross-source linking + // Skip empty names to prevent incorrect linking + if (node.name && node.name.trim() !== "") { identifiers.push(node.name.toLowerCase()); } // Add URI hostname (extract from URI) - if (node.uri) { + // Skip Proxmox URIs as they use format proxmox://node/vmid where 'node' is not unique per VM + // Skip AWS URIs as they use format aws:region:instance-id where splitting on ':' yields 'aws' for all nodes + if (node.uri && !node.uri.startsWith("proxmox://") && !node.uri.startsWith("aws:")) { try { // Extract hostname from URI // URIs can be in formats like: diff --git a/backend/src/integrations/aws/AWSPlugin.ts b/backend/src/integrations/aws/AWSPlugin.ts new file mode 100644 index 00000000..6a56db5f --- /dev/null +++ b/backend/src/integrations/aws/AWSPlugin.ts @@ -0,0 +1,651 @@ +/** + * AWS Integration Plugin + * + * Plugin class that integrates AWS EC2 into Pabawi. + * Implements both InformationSourcePlugin and ExecutionToolPlugin interfaces. + * + * Validates: Requirements 8.1, 8.2, 8.3, 8.4 + */ + +import { BasePlugin } from "../BasePlugin"; +import type { + HealthStatus, + InformationSourcePlugin, + ExecutionToolPlugin, + NodeGroup, + Capability, + Action, +} from "../types"; +import type { Node, Facts, ExecutionResult } from "../bolt/types"; +import type { LoggerService } from "../../services/LoggerService"; +import type { PerformanceMonitorService } from "../../services/PerformanceMonitorService"; +import type { JournalService } from "../../services/journal/JournalService"; +import type { CreateJournalEntry } from "../../services/journal/types"; +import type { + AWSConfig, + InstanceTypeInfo, + AMIInfo, + AMIFilter, + VPCInfo, + SubnetInfo, + SecurityGroupInfo, + KeyPairInfo, + ProvisioningCapability, +} from "./types"; +import { AWSService } from "./AWSService"; +import { AWSAuthenticationError } from "./types"; + +/** + * AWSPlugin - Plugin for AWS EC2 + * + * Provides: + * - Inventory discovery of EC2 instances + * - Group management (by region, VPC, tags) + * - Facts retrieval for instances + * - Lifecycle actions (start, stop, reboot, terminate) + * - Provisioning capabilities (launch/terminate instances) + * - Resource discovery (regions, instance types, AMIs, VPCs, subnets, security groups, key pairs) + * + * Validates: Requirements 8.1, 8.2, 8.3, 8.4, 9.1-9.4, 10.1-10.4, 11.1-11.4, 12.1-12.3, 13.1-13.7 + */ +export class AWSPlugin + extends BasePlugin + implements InformationSourcePlugin, ExecutionToolPlugin +{ + readonly type = "both" as const; + private service?: AWSService; + private journalService?: JournalService; + + /** + * Create a new AWSPlugin instance + * + * @param logger - Logger service instance (optional) + * @param performanceMonitor - Performance monitor service instance (optional) + * @param journalService - Journal service instance for recording events (optional) + */ + constructor( + logger?: LoggerService, + performanceMonitor?: PerformanceMonitorService, + journalService?: JournalService + ) { + super("aws", "both", logger, performanceMonitor); + this.journalService = journalService; + + this.logger.debug("AWSPlugin created", { + component: "AWSPlugin", + operation: "constructor", + }); + } + + /** + * Perform plugin-specific initialization + * + * Validates AWS configuration and initializes AWS SDK clients. + * + * @throws Error if configuration is invalid + */ + protected async performInitialization(): Promise { + this.logger.info("Initializing AWS integration", { + component: "AWSPlugin", + operation: "performInitialization", + }); + + const config = this.config.config as unknown as AWSConfig; + this.validateAWSConfig(config); + + // Create AWSService instance wrapping the EC2 client + this.service = new AWSService(config, this.logger); + + this.logger.info("AWS integration initialized successfully", { + component: "AWSPlugin", + operation: "performInitialization", + }); + } + + /** + * Validate AWS configuration + * + * @param config - AWS configuration to validate + * @throws Error if configuration is invalid + */ + private validateAWSConfig(config: AWSConfig): void { + this.logger.debug("Validating AWS configuration", { + component: "AWSPlugin", + operation: "validateAWSConfig", + }); + + // If explicit accessKeyId is provided, secretAccessKey must also be present + if (config.accessKeyId && !config.secretAccessKey) { + throw new Error( + "AWS configuration with accessKeyId must also include secretAccessKey" + ); + } + + // If no explicit credentials or profile, the AWS SDK will use the default + // credential chain (env vars, ~/.aws/credentials, instance profile, etc.) + if (!config.accessKeyId && !config.profile) { + this.logger.info("No explicit AWS credentials or profile configured — using default credential chain", { + component: "AWSPlugin", + operation: "validateAWSConfig", + }); + } + + this.logger.debug("AWS configuration validated successfully", { + component: "AWSPlugin", + operation: "validateAWSConfig", + }); + } + + /** + * Perform plugin-specific health check + * + * Uses STS GetCallerIdentity to validate credentials. + * + * Validates: Requirements 12.1, 12.2, 12.3 + * + * @returns Health status (without lastCheck timestamp) + */ + protected async performHealthCheck(): Promise< + Omit + > { + if (!this.service) { + return { + healthy: false, + message: "AWS service not initialized", + }; + } + + try { + const identity = await this.service.validateCredentials(); + const config = this.config.config as unknown as AWSConfig; + + return { + healthy: true, + message: `AWS authenticated as ${identity.arn}`, + details: { + account: identity.account, + arn: identity.arn, + userId: identity.userId, + region: config.region ?? 'us-east-1', + regions: config.regions, + hasAccessKey: !!config.accessKeyId, + hasProfile: !!config.profile, + hasEndpoint: !!config.endpoint, + }, + }; + } catch (error) { + if (error instanceof AWSAuthenticationError) { + const config = this.config.config as unknown as AWSConfig; + return { + healthy: false, + message: "AWS authentication failed", + details: { + region: config.region ?? 'us-east-1', + regions: config.regions, + hasAccessKey: !!config.accessKeyId, + hasProfile: !!config.profile, + hasEndpoint: !!config.endpoint, + }, + }; + } + + const config = this.config.config as unknown as AWSConfig; + return { + healthy: false, + message: error instanceof Error ? error.message : "AWS health check failed", + details: { + region: config.region ?? 'us-east-1', + regions: config.regions, + hasAccessKey: !!config.accessKeyId, + hasProfile: !!config.profile, + hasEndpoint: !!config.endpoint, + }, + }; + } + } + + // ======================================== + // InformationSourcePlugin Interface Methods + // ======================================== + + /** + * Get inventory of all EC2 instances + * + * Validates: Requirements 9.1, 9.4 + * + * @returns Array of Node objects representing EC2 instances + */ + async getInventory(): Promise { + this.ensureInitialized(); + return this.service!.getInventory(); + } + + /** + * Get groups of EC2 instances (by region, VPC, tags) + * + * Validates: Requirement 9.2 + * + * @returns Array of NodeGroup objects + */ + async getGroups(): Promise { + this.ensureInitialized(); + return this.service!.getGroups(); + } + + /** + * Get detailed facts for a specific EC2 instance + * + * Validates: Requirement 9.3 + * + * @param nodeId - Node identifier (e.g., aws:us-east-1:i-abc123) + * @returns Facts object with instance metadata + */ + async getNodeFacts(nodeId: string): Promise { + this.ensureInitialized(); + return this.service!.getNodeFacts(nodeId); + } + + /** + * Get arbitrary data for a node + * + * @param _nodeId - Node identifier + * @param _dataType - Type of data to retrieve + * @returns null (no additional data types supported yet) + */ + async getNodeData(_nodeId: string, _dataType: string): Promise { + this.ensureInitialized(); + return null; + } + + // ======================================== + // ExecutionToolPlugin Interface Methods + // ======================================== + + /** + * Execute an action (provisioning or lifecycle) on EC2 + * + * Routes based on action.action: + * - "provision" / "create_instance" → provisionInstance + * - "start" / "stop" / "reboot" / "terminate" → corresponding lifecycle method + * + * Records a journal entry on every completion (success or failure). + * Throws AWSAuthenticationError on invalid/expired credentials. + * + * Validates: Requirements 10.1-10.4, 11.1-11.4 + * + * @param action - Action to execute + * @returns ExecutionResult with success/error details + */ + async executeAction(action: Action): Promise { + this.ensureInitialized(); + + const startedAt = new Date().toISOString(); + const target = Array.isArray(action.target) ? action.target[0] : action.target; + + try { + let result: ExecutionResult; + + switch (action.action) { + case "provision": + case "create_instance": + result = await this.handleProvision(action, startedAt, target); + break; + case "start": + case "stop": + case "reboot": + case "terminate": + result = await this.handleLifecycle(action, startedAt, target); + break; + default: + throw new Error(`Unsupported AWS action: ${action.action}`); + } + + await this.recordJournal(action, target, result); + return result; + } catch (error) { + // Re-throw AWSAuthenticationError directly (Req 11.3) + if (error instanceof AWSAuthenticationError) { + await this.recordJournalFailure(action, target, startedAt, error.message); + throw error; + } + + const errorMessage = error instanceof Error ? error.message : String(error); + const failedResult = this.buildFailedResult(action, startedAt, target, errorMessage); + await this.recordJournal(action, target, failedResult); + return failedResult; + } + } + + /** + * Handle provisioning actions (create_instance / provision) + */ + private async handleProvision( + action: Action, + startedAt: string, + target: string + ): Promise { + const params = action.parameters ?? (action.metadata as Record) ?? {}; + const instanceId = await this.service!.provisionInstance(params); + const completedAt = new Date().toISOString(); + + return { + id: `aws-provision-${Date.now()}`, + type: "task", + targetNodes: [target], + action: action.action, + parameters: params, + status: "success", + startedAt, + completedAt, + results: [ + { + nodeId: instanceId, + status: "success", + output: { stdout: `Instance ${instanceId} launched successfully` }, + duration: new Date(completedAt).getTime() - new Date(startedAt).getTime(), + }, + ], + }; + } + + /** + * Handle lifecycle actions (start, stop, reboot, terminate) + */ + private async handleLifecycle( + action: Action, + startedAt: string, + target: string + ): Promise { + const { instanceId, region } = this.parseTarget(target, action); + + switch (action.action) { + case "start": + await this.service!.startInstance(instanceId, region); + break; + case "stop": + await this.service!.stopInstance(instanceId, region); + break; + case "reboot": + await this.service!.rebootInstance(instanceId, region); + break; + case "terminate": + await this.service!.terminateInstance(instanceId, region); + break; + } + + const completedAt = new Date().toISOString(); + + return { + id: `aws-${action.action}-${Date.now()}`, + type: "command", + targetNodes: [target], + action: action.action, + status: "success", + startedAt, + completedAt, + results: [ + { + nodeId: target, + status: "success", + output: { stdout: `Action ${action.action} completed on ${instanceId}` }, + duration: new Date(completedAt).getTime() - new Date(startedAt).getTime(), + }, + ], + }; + } + + /** + * Parse target string to extract instanceId and optional region. + * Supports "aws:region:instanceId" format or plain instance IDs. + */ + private parseTarget( + target: string, + action: Action + ): { instanceId: string; region?: string } { + const parts = target.split(":"); + if (parts.length >= 3 && parts[0] === "aws") { + return { region: parts[1], instanceId: parts.slice(2).join(":") }; + } + // Fall back to metadata or treat target as raw instance ID + const region = action.metadata?.region as string | undefined; + return { instanceId: target, region }; + } + + /** + * Build a failed ExecutionResult + */ + private buildFailedResult( + action: Action, + startedAt: string, + target: string, + errorMessage: string + ): ExecutionResult { + return { + id: `aws-error-${Date.now()}`, + type: "command", + targetNodes: [target], + action: action.action, + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + results: [ + { + nodeId: target, + status: "failed", + error: errorMessage, + duration: 0, + }, + ], + error: errorMessage, + }; + } + + /** + * Record a journal entry for a completed action (success or failure). + * Validates: Requirements 10.4, 11.4 + */ + private async recordJournal( + action: Action, + target: string, + result: ExecutionResult + ): Promise { + if (!this.journalService) return; + + const eventType = this.mapActionToEventType(action.action); + const entry: CreateJournalEntry = { + nodeId: target, + nodeUri: `aws:${target}`, + eventType, + source: "aws", + action: action.action, + summary: + result.status === "success" + ? `AWS ${action.action} succeeded on ${target}` + : `AWS ${action.action} failed on ${target}: ${result.error ?? "unknown error"}`, + details: { + status: result.status, + parameters: action.parameters, + ...(result.error ? { error: result.error } : {}), + }, + }; + + try { + await this.journalService.recordEvent(entry); + } catch (err) { + this.logger.error("Failed to record journal entry", { + component: "AWSPlugin", + operation: "recordJournal", + metadata: { error: err instanceof Error ? err.message : String(err) }, + }); + } + } + + /** + * Record a journal entry for a failure that throws (e.g., auth errors). + */ + private async recordJournalFailure( + action: Action, + target: string, + startedAt: string, + errorMessage: string + ): Promise { + const failedResult = this.buildFailedResult(action, startedAt, target, errorMessage); + await this.recordJournal(action, target, failedResult); + } + + /** + * Map an action name to a JournalEventType + */ + private mapActionToEventType( + actionName: string + ): "provision" | "start" | "stop" | "reboot" | "destroy" | "info" { + switch (actionName) { + case "provision": + case "create_instance": + return "provision"; + case "start": + return "start"; + case "stop": + return "stop"; + case "reboot": + return "reboot"; + case "terminate": + return "destroy"; + default: + return "info"; + } + } + + /** + * Set the JournalService (alternative to constructor injection) + */ + setJournalService(journalService: JournalService): void { + this.journalService = journalService; + } + + /** + * List lifecycle action capabilities + * + * @returns Array of Capability objects + */ + listCapabilities(): Capability[] { + return [ + { name: "start", description: "Start an EC2 instance" }, + { name: "stop", description: "Stop an EC2 instance" }, + { name: "reboot", description: "Reboot an EC2 instance" }, + { name: "terminate", description: "Terminate an EC2 instance" }, + ]; + } + + /** + * List provisioning capabilities + * + * @returns Array of ProvisioningCapability objects + */ + listProvisioningCapabilities(): ProvisioningCapability[] { + return [ + { + name: "create_instance", + description: "Launch a new EC2 instance", + operation: "create", + }, + { + name: "terminate_instance", + description: "Terminate an EC2 instance", + operation: "destroy", + }, + ]; + } + + // ======================================== + // AWS-Specific Resource Discovery Methods + // ======================================== + + /** + * Get available AWS regions + * + * Validates: Requirement 13.1 + */ + async getRegions(): Promise { + this.ensureInitialized(); + return this.service!.getRegions(); + } + + /** + * Get available EC2 instance types + * + * Validates: Requirement 13.2 + */ + async getInstanceTypes(_region?: string): Promise { + this.ensureInitialized(); + return this.service!.getInstanceTypes(_region); + } + + /** + * Get available AMIs for a region + * + * Validates: Requirement 13.3 + */ + async getAMIs(_region: string, _filters?: AMIFilter[]): Promise { + this.ensureInitialized(); + return this.service!.getAMIs(_region, _filters); + } + + /** + * Get available VPCs for a region + * + * Validates: Requirement 13.4 + */ + async getVPCs(_region: string): Promise { + this.ensureInitialized(); + return this.service!.getVPCs(_region); + } + + /** + * Get available subnets for a region + * + * Validates: Requirement 13.5 + */ + async getSubnets(_region: string, _vpcId?: string): Promise { + this.ensureInitialized(); + return this.service!.getSubnets(_region, _vpcId); + } + + /** + * Get available security groups for a region + * + * Validates: Requirement 13.6 + */ + async getSecurityGroups( + _region: string, + _vpcId?: string + ): Promise { + this.ensureInitialized(); + return this.service!.getSecurityGroups(_region, _vpcId); + } + + /** + * Get available key pairs for a region + * + * Validates: Requirement 13.7 + */ + async getKeyPairs(_region: string): Promise { + this.ensureInitialized(); + return this.service!.getKeyPairs(_region); + } + + // ======================================== + // Helper Methods + // ======================================== + + /** + * Ensure the plugin is initialized + * + * @throws Error if plugin is not initialized + */ + private ensureInitialized(): void { + if (!this.initialized || !this.config.enabled) { + throw new Error("AWS integration is not initialized"); + } + } +} diff --git a/backend/src/integrations/aws/AWSService.ts b/backend/src/integrations/aws/AWSService.ts new file mode 100644 index 00000000..dccb22d1 --- /dev/null +++ b/backend/src/integrations/aws/AWSService.ts @@ -0,0 +1,983 @@ +/** + * AWS Service + * + * Wraps the AWS SDK EC2 client to provide inventory discovery, + * grouping, facts retrieval, and resource discovery for EC2 instances. + * + * Validates: Requirements 9.1, 9.2, 9.3, 9.4, 13.1-13.7 + */ + +import { + EC2Client, + DescribeInstancesCommand, + DescribeRegionsCommand, + DescribeInstanceTypesCommand, + DescribeImagesCommand, + DescribeVpcsCommand, + DescribeSubnetsCommand, + DescribeSecurityGroupsCommand, + DescribeKeyPairsCommand, + RunInstancesCommand, + StartInstancesCommand, + StopInstancesCommand, + RebootInstancesCommand, + TerminateInstancesCommand, + type Instance, + type Tag, + type Filter, + type RunInstancesCommandInput, +} from "@aws-sdk/client-ec2"; +import { STSClient, GetCallerIdentityCommand } from "@aws-sdk/client-sts"; +import { AWSAuthenticationError } from "./types"; +import type { Node, Facts } from "../bolt/types"; +import type { NodeGroup } from "../types"; +import type { LoggerService } from "../../services/LoggerService"; +import type { + AWSConfig, + InstanceTypeInfo, + AMIInfo, + AMIFilter, + VPCInfo, + SubnetInfo, + SecurityGroupInfo, + KeyPairInfo, +} from "./types"; + +/** + * Extract a tag value from an array of AWS tags + */ +function getTagValue(tags: Tag[] | undefined, key: string): string | undefined { + return tags?.find((t) => t.Key === key)?.Value; +} + +/** + * Convert AWS tags array to a plain Record + */ +function tagsToRecord(tags: Tag[] | undefined): Record { + const result: Record = {}; + if (tags) { + for (const tag of tags) { + if (tag.Key && tag.Value !== undefined) { + result[tag.Key] = tag.Value; + } + } + } + return result; +} + +/** + * AWSService - Wraps AWS SDK EC2 client for inventory and resource discovery + */ +export class AWSService { + private client: EC2Client; + private readonly clientConfig: Record; + private readonly region: string; + private readonly regions: string[]; + private readonly logger: LoggerService; + + constructor(config: AWSConfig, logger: LoggerService) { + this.logger = logger; + this.region = config.region || "us-east-1"; + this.regions = config.regions && config.regions.length > 0 + ? config.regions + : [this.region]; + + const clientConfig: Record = { + region: this.region, + }; + + if (config.endpoint) { + clientConfig.endpoint = config.endpoint; + } + + if (config.accessKeyId && config.secretAccessKey) { + clientConfig.credentials = { + accessKeyId: config.accessKeyId, + secretAccessKey: config.secretAccessKey, + ...(config.sessionToken ? { sessionToken: config.sessionToken } : {}), + }; + } + + this.clientConfig = clientConfig; + this.client = new EC2Client(clientConfig); + + this.logger.debug("AWSService created", { + component: "AWSService", + operation: "constructor", + metadata: { region: this.region, regions: this.regions }, + }); + } + + /** + * Create an EC2Client for a specific region (used by resource discovery methods) + */ + private getClientForRegion(region: string): EC2Client { + if (region === this.region) { + return this.client; + } + // Build a new client with the same credentials but different region + return new EC2Client({ + ...this.clientConfig, + region, + }); + } + + // ======================================== + // Credential Validation + // ======================================== + + /** + * Validate AWS credentials using STS GetCallerIdentity + * + * Validates: Requirements 12.1, 12.2 + * + * @returns Account details (account, arn, userId) on success + * @throws AWSAuthenticationError on invalid credentials + */ + async validateCredentials(): Promise<{ account: string; arn: string; userId: string }> { + this.logger.debug("Validating AWS credentials via STS", { + component: "AWSService", + operation: "validateCredentials", + }); + + const stsClient = new STSClient(this.clientConfig); + + try { + const response = await stsClient.send(new GetCallerIdentityCommand({})); + + const result = { + account: response.Account || "", + arn: response.Arn || "", + userId: response.UserId || "", + }; + + this.logger.info("AWS credentials validated", { + component: "AWSService", + operation: "validateCredentials", + metadata: { account: result.account }, + }); + + return result; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + this.logger.error("AWS credential validation failed", { + component: "AWSService", + operation: "validateCredentials", + metadata: { error: message }, + }); + throw new AWSAuthenticationError(message); + } finally { + stsClient.destroy(); + } + } + + // ======================================== + // Inventory & Grouping + // ======================================== + + /** + * List all EC2 instances as Node objects + * + * Validates: Requirements 9.1, 9.4 + */ + async getInventory(): Promise { + this.logger.debug("Fetching EC2 inventory", { + component: "AWSService", + operation: "getInventory", + metadata: { regions: this.regions }, + }); + + // Query all configured regions in parallel + const regionResults = await Promise.all( + this.regions.map(async (region) => { + try { + const instances = await this.describeAllInstancesInRegion(region); + this.logger.debug(`Region ${region} returned ${String(instances.length)} instances`, { + component: "AWSService", + operation: "getInventory", + metadata: { region, count: instances.length }, + }); + return instances.map((instance) => this.transformInstanceToNode(instance, region)); + } catch (error) { + this.logger.error(`Failed to fetch inventory from region ${region}`, { + component: "AWSService", + operation: "getInventory", + metadata: { region }, + }, error instanceof Error ? error : undefined); + return []; + } + }) + ); + + const nodes = regionResults.flat(); + + this.logger.info("EC2 inventory fetched", { + component: "AWSService", + operation: "getInventory", + metadata: { count: nodes.length, regions: this.regions }, + }); + + return nodes; + } + + /** + * Group instances by region, VPC, and tags + * + * Validates: Requirement 9.2 + */ + async getGroups(): Promise { + this.logger.debug("Building EC2 groups", { + component: "AWSService", + operation: "getGroups", + }); + + const inventory = await this.getInventory(); + const groups: NodeGroup[] = []; + + groups.push(...this.groupByRegion(inventory)); + groups.push(...this.groupByVPC(inventory)); + groups.push(...this.groupByTags(inventory)); + + this.logger.info("EC2 groups built", { + component: "AWSService", + operation: "getGroups", + metadata: { groupCount: groups.length }, + }); + + return groups; + } + + /** + * Get detailed facts for a specific EC2 instance + * + * Validates: Requirement 9.3 + * + * @param nodeId - Node URI (e.g., "aws:us-east-1:i-abc123") + */ + async getNodeFacts(nodeId: string): Promise { + this.logger.debug("Fetching node facts", { + component: "AWSService", + operation: "getNodeFacts", + metadata: { nodeId }, + }); + + const { region, instanceId } = this.parseNodeId(nodeId); + const client = this.getClientForRegion(region); + + const response = await client.send( + new DescribeInstancesCommand({ + InstanceIds: [instanceId], + }) + ); + + const instance = response.Reservations?.[0]?.Instances?.[0]; + if (!instance) { + throw new Error(`Instance not found: ${instanceId}`); + } + + return this.transformToFacts(nodeId, instance); + } + + // ======================================== + // Resource Discovery + // ======================================== + + /** + * Get available AWS regions + * + * Validates: Requirement 13.1 + */ + async getRegions(): Promise { + this.logger.debug("Fetching AWS regions", { + component: "AWSService", + operation: "getRegions", + }); + + const response = await this.client.send(new DescribeRegionsCommand({})); + const regions = (response.Regions || []) + .map((r) => r.RegionName) + .filter((name): name is string => !!name) + .sort(); + + this.logger.info("AWS regions fetched", { + component: "AWSService", + operation: "getRegions", + metadata: { count: regions.length }, + }); + + return regions; + } + + /** + * Get available EC2 instance types + * + * Validates: Requirement 13.2 + */ + async getInstanceTypes(region?: string): Promise { + const client = region ? this.getClientForRegion(region) : this.client; + + this.logger.debug("Fetching instance types", { + component: "AWSService", + operation: "getInstanceTypes", + metadata: { region: region || this.region }, + }); + + const results: InstanceTypeInfo[] = []; + let nextToken: string | undefined; + + do { + const response = await client.send( + new DescribeInstanceTypesCommand({ + NextToken: nextToken, + MaxResults: 100, + }) + ); + + for (const it of response.InstanceTypes || []) { + results.push({ + instanceType: it.InstanceType || "unknown", + vCpus: it.VCpuInfo?.DefaultVCpus || 0, + memoryMiB: it.MemoryInfo?.SizeInMiB || 0, + architecture: it.ProcessorInfo?.SupportedArchitectures?.[0] || "unknown", + currentGeneration: it.CurrentGeneration ?? false, + }); + } + + nextToken = response.NextToken; + } while (nextToken); + + this.logger.info("Instance types fetched", { + component: "AWSService", + operation: "getInstanceTypes", + metadata: { count: results.length }, + }); + + return results; + } + + /** + * Get available AMIs for a region + * + * Validates: Requirement 13.3 + */ + async getAMIs(region: string, filters?: AMIFilter[]): Promise { + const client = this.getClientForRegion(region); + + this.logger.debug("Fetching AMIs", { + component: "AWSService", + operation: "getAMIs", + metadata: { region }, + }); + + const ec2Filters: Filter[] = (filters || []).map((f) => ({ + Name: f.name, + Values: f.values, + })); + + // Default: only show available images owned by self or Amazon + if (ec2Filters.length === 0) { + ec2Filters.push({ Name: "state", Values: ["available"] }); + } + + const response = await client.send( + new DescribeImagesCommand({ + Filters: ec2Filters, + Owners: ["self", "amazon"], + MaxResults: 200, + }) + ); + + const amis: AMIInfo[] = (response.Images || []).map((img) => ({ + imageId: img.ImageId || "", + name: img.Name || "", + description: img.Description, + architecture: img.Architecture || "unknown", + ownerId: img.OwnerId || "", + state: img.State || "unknown", + platform: img.PlatformDetails, + creationDate: img.CreationDate, + })); + + this.logger.info("AMIs fetched", { + component: "AWSService", + operation: "getAMIs", + metadata: { region, count: amis.length }, + }); + + return amis; + } + + /** + * Get available VPCs for a region + * + * Validates: Requirement 13.4 + */ + async getVPCs(region: string): Promise { + const client = this.getClientForRegion(region); + + this.logger.debug("Fetching VPCs", { + component: "AWSService", + operation: "getVPCs", + metadata: { region }, + }); + + const response = await client.send(new DescribeVpcsCommand({})); + + const vpcs: VPCInfo[] = (response.Vpcs || []).map((vpc) => ({ + vpcId: vpc.VpcId || "", + cidrBlock: vpc.CidrBlock || "", + state: vpc.State || "unknown", + isDefault: vpc.IsDefault ?? false, + tags: tagsToRecord(vpc.Tags), + })); + + this.logger.info("VPCs fetched", { + component: "AWSService", + operation: "getVPCs", + metadata: { region, count: vpcs.length }, + }); + + return vpcs; + } + + /** + * Get available subnets for a region + * + * Validates: Requirement 13.5 + */ + async getSubnets(region: string, vpcId?: string): Promise { + const client = this.getClientForRegion(region); + + this.logger.debug("Fetching subnets", { + component: "AWSService", + operation: "getSubnets", + metadata: { region, vpcId }, + }); + + const filters: Filter[] = []; + if (vpcId) { + filters.push({ Name: "vpc-id", Values: [vpcId] }); + } + + const response = await client.send( + new DescribeSubnetsCommand({ + Filters: filters.length > 0 ? filters : undefined, + }) + ); + + const subnets: SubnetInfo[] = (response.Subnets || []).map((s) => ({ + subnetId: s.SubnetId || "", + vpcId: s.VpcId || "", + cidrBlock: s.CidrBlock || "", + availabilityZone: s.AvailabilityZone || "", + availableIpAddressCount: s.AvailableIpAddressCount || 0, + tags: tagsToRecord(s.Tags), + })); + + this.logger.info("Subnets fetched", { + component: "AWSService", + operation: "getSubnets", + metadata: { region, count: subnets.length }, + }); + + return subnets; + } + + /** + * Get available security groups for a region + * + * Validates: Requirement 13.6 + */ + async getSecurityGroups(region: string, vpcId?: string): Promise { + const client = this.getClientForRegion(region); + + this.logger.debug("Fetching security groups", { + component: "AWSService", + operation: "getSecurityGroups", + metadata: { region, vpcId }, + }); + + const filters: Filter[] = []; + if (vpcId) { + filters.push({ Name: "vpc-id", Values: [vpcId] }); + } + + const response = await client.send( + new DescribeSecurityGroupsCommand({ + Filters: filters.length > 0 ? filters : undefined, + }) + ); + + const groups: SecurityGroupInfo[] = (response.SecurityGroups || []).map((sg) => ({ + groupId: sg.GroupId || "", + groupName: sg.GroupName || "", + description: sg.Description || "", + vpcId: sg.VpcId || "", + tags: tagsToRecord(sg.Tags), + })); + + this.logger.info("Security groups fetched", { + component: "AWSService", + operation: "getSecurityGroups", + metadata: { region, count: groups.length }, + }); + + return groups; + } + + /** + * Get available key pairs for a region + * + * Validates: Requirement 13.7 + */ + async getKeyPairs(region: string): Promise { + const client = this.getClientForRegion(region); + + this.logger.debug("Fetching key pairs", { + component: "AWSService", + operation: "getKeyPairs", + metadata: { region }, + }); + + const response = await client.send(new DescribeKeyPairsCommand({})); + + const keyPairs: KeyPairInfo[] = (response.KeyPairs || []).map((kp) => ({ + keyName: kp.KeyName || "", + keyPairId: kp.KeyPairId || "", + keyFingerprint: kp.KeyFingerprint || "", + keyType: kp.KeyType, + })); + + this.logger.info("Key pairs fetched", { + component: "AWSService", + operation: "getKeyPairs", + metadata: { region, count: keyPairs.length }, + }); + + return keyPairs; + } + + // ======================================== + // Provisioning & Lifecycle + // ======================================== + + /** + * Provision a new EC2 instance via RunInstances. + * + * Validates: Requirement 10.1, 10.2, 10.3 + * + * @returns The new instance ID + */ + async provisionInstance(params: Record): Promise { + this.logger.info("Provisioning EC2 instance", { + component: "AWSService", + operation: "provisionInstance", + metadata: { imageId: params.imageId, instanceType: params.instanceType }, + }); + + const region = (params.region as string) || this.region; + const client = this.getClientForRegion(region); + + try { + const response = await client.send( + new RunInstancesCommand({ + ImageId: params.imageId as string, + InstanceType: ((params.instanceType as string) || "t2.micro") as RunInstancesCommandInput["InstanceType"], + MinCount: 1, + MaxCount: 1, + KeyName: params.keyName as string | undefined, + SecurityGroupIds: params.securityGroupIds as string[] | undefined, + SubnetId: params.subnetId as string | undefined, + TagSpecifications: params.name + ? [ + { + ResourceType: "instance", + Tags: [{ Key: "Name", Value: params.name as string }], + }, + ] + : undefined, + }) + ); + + const instanceId = response.Instances?.[0]?.InstanceId; + if (!instanceId) { + throw new Error("RunInstances returned no instance ID"); + } + + this.logger.info("EC2 instance provisioned", { + component: "AWSService", + operation: "provisionInstance", + metadata: { instanceId, region }, + }); + + return instanceId; + } catch (error) { + this.throwIfAuthError(error); + throw error; + } + } + + /** + * Start an EC2 instance. + * Validates: Requirement 11.1 + */ + async startInstance(instanceId: string, region?: string): Promise { + const client = this.getClientForRegion(region || this.region); + try { + await client.send(new StartInstancesCommand({ InstanceIds: [instanceId] })); + this.logger.info("EC2 instance started", { + component: "AWSService", + operation: "startInstance", + metadata: { instanceId }, + }); + } catch (error) { + this.throwIfAuthError(error); + throw error; + } + } + + /** + * Stop an EC2 instance. + * Validates: Requirement 11.1 + */ + async stopInstance(instanceId: string, region?: string): Promise { + const client = this.getClientForRegion(region || this.region); + try { + await client.send(new StopInstancesCommand({ InstanceIds: [instanceId] })); + this.logger.info("EC2 instance stopped", { + component: "AWSService", + operation: "stopInstance", + metadata: { instanceId }, + }); + } catch (error) { + this.throwIfAuthError(error); + throw error; + } + } + + /** + * Reboot an EC2 instance. + * Validates: Requirement 11.1 + */ + async rebootInstance(instanceId: string, region?: string): Promise { + const client = this.getClientForRegion(region || this.region); + try { + await client.send(new RebootInstancesCommand({ InstanceIds: [instanceId] })); + this.logger.info("EC2 instance rebooted", { + component: "AWSService", + operation: "rebootInstance", + metadata: { instanceId }, + }); + } catch (error) { + this.throwIfAuthError(error); + throw error; + } + } + + /** + * Terminate an EC2 instance. + * Validates: Requirement 11.1 + */ + async terminateInstance(instanceId: string, region?: string): Promise { + const client = this.getClientForRegion(region || this.region); + try { + await client.send(new TerminateInstancesCommand({ InstanceIds: [instanceId] })); + this.logger.info("EC2 instance terminated", { + component: "AWSService", + operation: "terminateInstance", + metadata: { instanceId }, + }); + } catch (error) { + this.throwIfAuthError(error); + throw error; + } + } + + /** + * Check if an AWS SDK error is an authentication/credentials error + * and throw AWSAuthenticationError if so. + */ + private throwIfAuthError(error: unknown): void { + if (error instanceof Error) { + const name = (error as Error & { name?: string }).name ?? ""; + const code = (error as Error & { Code?: string }).Code ?? ""; + const authErrors = [ + "AuthFailure", + "UnauthorizedAccess", + "InvalidClientTokenId", + "SignatureDoesNotMatch", + "ExpiredToken", + "ExpiredTokenException", + "AccessDeniedException", + "CredentialsError", + ]; + if (authErrors.includes(name) || authErrors.includes(code)) { + throw new AWSAuthenticationError(error.message); + } + } + } + + // ======================================== + // Private Helpers + // ======================================== + + /** + * Describe all EC2 instances using pagination + */ + private async describeAllInstances(): Promise { + return this.describeAllInstancesInRegion(this.region); + } + + /** + * Describe all EC2 instances in a specific region using pagination + */ + private async describeAllInstancesInRegion(region: string): Promise { + const client = this.getClientForRegion(region); + const instances: Instance[] = []; + let nextToken: string | undefined; + + do { + const response = await client.send( + new DescribeInstancesCommand({ NextToken: nextToken }) + ); + + for (const reservation of response.Reservations || []) { + for (const instance of reservation.Instances || []) { + instances.push(instance); + } + } + + nextToken = response.NextToken; + } while (nextToken); + + return instances; + } + + /** + * Transform an EC2 Instance into a Node object + * + * Validates: Requirement 9.4 - includes state, type, region, VPC, tags + */ + private transformInstanceToNode(instance: Instance, queryRegion?: string): Node { + const instanceId = instance.InstanceId || "unknown"; + const nameTag = getTagValue(instance.Tags, "Name"); + const tags = tagsToRecord(instance.Tags); + const state = instance.State?.Name || "unknown"; + const instanceType = instance.InstanceType || "unknown"; + const vpcId = instance.VpcId || ""; + const az = instance.Placement?.AvailabilityZone || queryRegion || this.region; + const instanceRegion = az.replace(/-[a-z]$/, ""); + + const nodeId = `aws:${instanceRegion}:${instanceId}`; + + const node: Node = { + id: nodeId, + name: nameTag || instanceId, + uri: `aws:${instanceRegion}:${instanceId}`, + transport: "ssh" as const, + config: { + instanceId, + state, + instanceType, + region: instanceRegion, + vpcId, + tags, + availabilityZone: az, + publicIp: instance.PublicIpAddress, + privateIp: instance.PrivateIpAddress, + }, + source: "aws", + }; + + // Attach status for UI display (same pattern as ProxmoxService) + (node as Node & { status?: string }).status = state; + + return node; + } + + /** + * Transform an EC2 Instance into a Facts object + */ + private transformToFacts(nodeId: string, instance: Instance): Facts { + const tags = tagsToRecord(instance.Tags); + const state = instance.State?.Name || "unknown"; + const instanceType = instance.InstanceType || "unknown"; + const az = instance.Placement?.AvailabilityZone || this.region; + const instanceRegion = az.replace(/-[a-z]$/, ""); + + return { + nodeId, + gatheredAt: new Date().toISOString(), + source: "aws", + facts: { + os: { + family: instance.Platform === "Windows" ? "windows" : "linux", + name: instance.PlatformDetails || "unknown", + release: { full: "unknown", major: "unknown" }, + }, + processors: { + count: 0, // Not available from describeInstances directly + models: [], + }, + memory: { + system: { total: "unknown", available: "unknown" }, + }, + networking: { + hostname: instance.PrivateDnsName || "unknown", + interfaces: { + ...(instance.PublicIpAddress + ? { public: { ip: instance.PublicIpAddress } } + : {}), + ...(instance.PrivateIpAddress + ? { private: { ip: instance.PrivateIpAddress } } + : {}), + }, + }, + categories: { + system: { + instanceId: instance.InstanceId, + state, + instanceType, + region: instanceRegion, + availabilityZone: az, + launchTime: instance.LaunchTime?.toISOString(), + architecture: instance.Architecture, + platform: instance.Platform || "linux", + }, + network: { + vpcId: instance.VpcId, + subnetId: instance.SubnetId, + publicIp: instance.PublicIpAddress, + privateIp: instance.PrivateIpAddress, + publicDns: instance.PublicDnsName, + privateDns: instance.PrivateDnsName, + }, + hardware: { + instanceType, + architecture: instance.Architecture, + rootDeviceType: instance.RootDeviceType, + rootDeviceName: instance.RootDeviceName, + ebsOptimized: instance.EbsOptimized, + }, + custom: { + tags, + keyName: instance.KeyName, + imageId: instance.ImageId, + securityGroups: (instance.SecurityGroups || []).map((sg) => ({ + groupId: sg.GroupId, + groupName: sg.GroupName, + })), + }, + }, + }, + }; + } + + /** + * Parse a node ID (e.g., "aws:us-east-1:i-abc123") into region and instanceId + */ + private parseNodeId(nodeId: string): { region: string; instanceId: string } { + const parts = nodeId.split(":"); + if (parts.length < 3 || parts[0] !== "aws") { + throw new Error( + `Invalid AWS node ID format: ${nodeId}. Expected "aws:{region}:{instanceId}"` + ); + } + return { region: parts[1], instanceId: parts.slice(2).join(":") }; + } + + /** + * Group nodes by region + */ + private groupByRegion(nodes: Node[]): NodeGroup[] { + const regionMap = new Map(); + + for (const node of nodes) { + const region = (node.config.region as string) || this.region; + if (!regionMap.has(region)) { + regionMap.set(region, []); + } + regionMap.get(region)!.push(node.id); + } + + return Array.from(regionMap.entries()).map(([region, nodeIds]) => ({ + id: `aws:region:${region}`, + name: `AWS ${region}`, + source: "aws", + sources: ["aws"], + linked: false, + nodes: nodeIds, + metadata: { description: `EC2 instances in ${region}` }, + })); + } + + /** + * Group nodes by VPC + */ + private groupByVPC(nodes: Node[]): NodeGroup[] { + const vpcMap = new Map(); + + for (const node of nodes) { + const vpcId = (node.config.vpcId as string) || "no-vpc"; + if (!vpcMap.has(vpcId)) { + vpcMap.set(vpcId, []); + } + vpcMap.get(vpcId)!.push(node.id); + } + + return Array.from(vpcMap.entries()).map(([vpcId, nodeIds]) => ({ + id: `aws:vpc:${vpcId}`, + name: vpcId === "no-vpc" ? "No VPC" : `VPC ${vpcId}`, + source: "aws", + sources: ["aws"], + linked: false, + nodes: nodeIds, + metadata: { description: `EC2 instances in VPC ${vpcId}` }, + })); + } + + /** + * Group nodes by tag keys (e.g., "Environment", "Project") + */ + private groupByTags(nodes: Node[]): NodeGroup[] { + // Collect groups for well-known tag keys + const tagKeys = ["Environment", "Project", "Team", "Application", "Stack"]; + const tagGroups = new Map>(); + + for (const node of nodes) { + const tags = (node.config.tags as Record) || {}; + for (const key of tagKeys) { + const value = tags[key]; + if (value) { + if (!tagGroups.has(key)) { + tagGroups.set(key, new Map()); + } + const valueMap = tagGroups.get(key)!; + if (!valueMap.has(value)) { + valueMap.set(value, []); + } + valueMap.get(value)!.push(node.id); + } + } + } + + const groups: NodeGroup[] = []; + for (const [tagKey, valueMap] of tagGroups) { + for (const [tagValue, nodeIds] of valueMap) { + groups.push({ + id: `aws:tag:${tagKey}:${tagValue}`, + name: `${tagKey}: ${tagValue}`, + source: "aws", + sources: ["aws"], + linked: false, + nodes: nodeIds, + metadata: { + description: `EC2 instances with tag ${tagKey}=${tagValue}`, + }, + }); + } + } + + return groups; + } +} diff --git a/backend/src/integrations/aws/__tests__/AWSPlugin.healthCheck.test.ts b/backend/src/integrations/aws/__tests__/AWSPlugin.healthCheck.test.ts new file mode 100644 index 00000000..d024d451 --- /dev/null +++ b/backend/src/integrations/aws/__tests__/AWSPlugin.healthCheck.test.ts @@ -0,0 +1,216 @@ +/** + * AWSPlugin Health Check Tests + * + * Tests for performHealthCheck() using STS GetCallerIdentity. + * + * Validates: Requirements 12.1, 12.2, 12.3 + */ + +import { describe, it, expect, beforeEach, vi } from "vitest"; +import { AWSPlugin } from "../AWSPlugin"; +import { AWSAuthenticationError } from "../types"; +import type { IntegrationConfig } from "../../types"; +import type { LoggerService } from "../../../services/LoggerService"; + +// Mock AWSService +const mockValidateCredentials = vi.fn(); +const mockServiceInstance = { + validateCredentials: mockValidateCredentials, + getInventory: vi.fn(), + getGroups: vi.fn(), + getNodeFacts: vi.fn(), + getRegions: vi.fn(), + getInstanceTypes: vi.fn(), + getAMIs: vi.fn(), + getVPCs: vi.fn(), + getSubnets: vi.fn(), + getSecurityGroups: vi.fn(), + getKeyPairs: vi.fn(), + provisionInstance: vi.fn(), + startInstance: vi.fn(), + stopInstance: vi.fn(), + rebootInstance: vi.fn(), + terminateInstance: vi.fn(), +}; + +vi.mock("../AWSService", () => ({ + AWSService: class { + validateCredentials = mockServiceInstance.validateCredentials; + getInventory = mockServiceInstance.getInventory; + getGroups = mockServiceInstance.getGroups; + getNodeFacts = mockServiceInstance.getNodeFacts; + getRegions = mockServiceInstance.getRegions; + getInstanceTypes = mockServiceInstance.getInstanceTypes; + getAMIs = mockServiceInstance.getAMIs; + getVPCs = mockServiceInstance.getVPCs; + getSubnets = mockServiceInstance.getSubnets; + getSecurityGroups = mockServiceInstance.getSecurityGroups; + getKeyPairs = mockServiceInstance.getKeyPairs; + provisionInstance = mockServiceInstance.provisionInstance; + startInstance = mockServiceInstance.startInstance; + stopInstance = mockServiceInstance.stopInstance; + rebootInstance = mockServiceInstance.rebootInstance; + terminateInstance = mockServiceInstance.terminateInstance; + }, +})); + +describe("AWSPlugin Health Check", () => { + let plugin: AWSPlugin; + let mockLogger: LoggerService; + + const validConfig: IntegrationConfig = { + enabled: true, + name: "aws", + type: "both", + config: { + accessKeyId: "AKIAIOSFODNN7EXAMPLE", // pragma: allowlist secret + secretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", // pragma: allowlist secret + region: "us-east-1", + }, + }; + + beforeEach(() => { + vi.clearAllMocks(); + + mockLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + } as unknown as LoggerService; + + plugin = new AWSPlugin(mockLogger); + }); + + describe("healthy credentials (Req 12.1)", () => { + it("should return healthy with account details on valid credentials", async () => { + await plugin.initialize(validConfig); + + mockValidateCredentials.mockResolvedValue({ + account: "123456789012", + arn: "arn:aws:iam::123456789012:user/testuser", + userId: "AIDAEXAMPLEUSERID", + }); + + const health = await plugin.healthCheck(); + + expect(health.healthy).toBe(true); + expect(health.message).toContain("arn:aws:iam::123456789012:user/testuser"); + expect(health.details).toEqual({ + account: "123456789012", + arn: "arn:aws:iam::123456789012:user/testuser", + userId: "AIDAEXAMPLEUSERID", + region: "us-east-1", + regions: undefined, + hasAccessKey: true, + hasProfile: false, + hasEndpoint: false, + }); + expect(health.lastCheck).toBeDefined(); + expect(mockValidateCredentials).toHaveBeenCalledOnce(); + }); + }); + + describe("invalid credentials (Req 12.2)", () => { + it("should return unhealthy with 'AWS authentication failed' on invalid credentials", async () => { + await plugin.initialize(validConfig); + + mockValidateCredentials.mockRejectedValue( + new AWSAuthenticationError("The security token included in the request is invalid") + ); + + const health = await plugin.healthCheck(); + + expect(health.healthy).toBe(false); + expect(health.message).toBe("AWS authentication failed"); + expect(mockValidateCredentials).toHaveBeenCalledOnce(); + }); + + it("should return unhealthy with error message on non-auth errors", async () => { + await plugin.initialize(validConfig); + + mockValidateCredentials.mockRejectedValue(new Error("Network timeout")); + + const health = await plugin.healthCheck(); + + expect(health.healthy).toBe(false); + expect(health.message).toBe("Network timeout"); + }); + }); + + describe("plugin continues accepting config updates when unhealthy (Req 12.3)", () => { + it("should allow re-initialization after unhealthy health check", async () => { + await plugin.initialize(validConfig); + + // First: unhealthy + mockValidateCredentials.mockRejectedValue( + new AWSAuthenticationError("Invalid credentials") + ); + const unhealthy = await plugin.healthCheck(); + expect(unhealthy.healthy).toBe(false); + + // Plugin should still be initialized and accept new config + expect(plugin.isInitialized()).toBe(true); + + // Re-initialize with new config + const newConfig: IntegrationConfig = { + ...validConfig, + config: { + accessKeyId: "AKIANEWKEYEXAMPLE", // pragma: allowlist secret + secretAccessKey: "newSecretKeyExample123", // pragma: allowlist secret + region: "eu-west-1", + }, + }; + await plugin.initialize(newConfig); + expect(plugin.isInitialized()).toBe(true); + + // Now healthy + mockValidateCredentials.mockResolvedValue({ + account: "987654321098", + arn: "arn:aws:iam::987654321098:user/newuser", + userId: "AIDANEWUSERID", + }); + const healthy = await plugin.healthCheck(); + expect(healthy.healthy).toBe(true); + }); + + it("should not crash when health check fails", async () => { + await plugin.initialize(validConfig); + + mockValidateCredentials.mockRejectedValue( + new AWSAuthenticationError("Expired token") + ); + + // Should not throw + const health = await plugin.healthCheck(); + expect(health.healthy).toBe(false); + + // Plugin still functional for other operations + expect(plugin.isInitialized()).toBe(true); + expect(plugin.getConfig().enabled).toBe(true); + }); + }); + + describe("edge cases", () => { + it("should return unhealthy when plugin is not initialized", async () => { + const health = await plugin.healthCheck(); + + expect(health.healthy).toBe(false); + expect(health.message).toBe("Plugin is not initialized"); + }); + + it("should return unhealthy when plugin is disabled", async () => { + const disabledConfig: IntegrationConfig = { + ...validConfig, + enabled: false, + }; + await plugin.initialize(disabledConfig); + + const health = await plugin.healthCheck(); + + expect(health.healthy).toBe(false); + // BasePlugin checks initialized before enabled; disabled plugins don't set initialized=true + expect(health.message).toBe("Plugin is not initialized"); + }); + }); +}); diff --git a/backend/src/integrations/aws/types.ts b/backend/src/integrations/aws/types.ts new file mode 100644 index 00000000..77a742f4 --- /dev/null +++ b/backend/src/integrations/aws/types.ts @@ -0,0 +1,111 @@ +/** + * AWS Integration Types + * + * Type definitions for the AWS EC2 integration plugin. + */ + +import type { ProvisioningCapability } from "../types"; + +export type { ProvisioningCapability }; + +/** + * AWS configuration + */ +export interface AWSConfig { + accessKeyId?: string; + secretAccessKey?: string; + region?: string; + regions?: string[]; + sessionToken?: string; + profile?: string; + endpoint?: string; +} + +/** + * EC2 instance type information + */ +export interface InstanceTypeInfo { + instanceType: string; + vCpus: number; + memoryMiB: number; + architecture: string; + currentGeneration: boolean; +} + +/** + * AMI (Amazon Machine Image) information + */ +export interface AMIInfo { + imageId: string; + name: string; + description?: string; + architecture: string; + ownerId: string; + state: string; + platform?: string; + creationDate?: string; +} + +/** + * AMI filter for querying AMIs + */ +export interface AMIFilter { + name: string; + values: string[]; +} + +/** + * VPC information + */ +export interface VPCInfo { + vpcId: string; + cidrBlock: string; + state: string; + isDefault: boolean; + tags: Record; +} + +/** + * Subnet information + */ +export interface SubnetInfo { + subnetId: string; + vpcId: string; + cidrBlock: string; + availabilityZone: string; + availableIpAddressCount: number; + tags: Record; +} + +/** + * Security group information + */ +export interface SecurityGroupInfo { + groupId: string; + groupName: string; + description: string; + vpcId: string; + tags: Record; +} + +/** + * Key pair information + */ +export interface KeyPairInfo { + keyName: string; + keyPairId: string; + keyFingerprint: string; + keyType?: string; +} + +/** + * AWS authentication error + * + * Thrown when AWS credentials are invalid, expired, or lack required IAM permissions. + */ +export class AWSAuthenticationError extends Error { + constructor(message: string) { + super(message); + this.name = "AWSAuthenticationError"; + } +} diff --git a/backend/src/integrations/proxmox/ProxmoxClient.ts b/backend/src/integrations/proxmox/ProxmoxClient.ts new file mode 100644 index 00000000..20de7547 --- /dev/null +++ b/backend/src/integrations/proxmox/ProxmoxClient.ts @@ -0,0 +1,553 @@ +/** + * Proxmox API Client + * + * Low-level HTTP client for communicating with the Proxmox VE API. + * Handles authentication, request/response transformation, and error handling. + */ + +import * as https from "node:https"; +import * as http from "node:http"; +import { readFileSync } from "node:fs"; + +import type { LoggerService } from "../../services/LoggerService"; +import type { + ProxmoxConfig, + ProxmoxTaskStatus, + RetryConfig, +} from "./types"; +import { + ProxmoxError, + ProxmoxAuthenticationError, +} from "./types"; + +/** + * ProxmoxClient - HTTP client for Proxmox VE API + * + * Responsibilities: + * - Manage authentication (ticket-based and token-based) + * - Execute HTTP requests with proper headers + * - Handle authentication ticket refresh + * - Configure HTTPS agent with SSL options + * - Transform HTTP errors into domain-specific exceptions + */ +export class ProxmoxClient { + private baseUrl: string; + private config: ProxmoxConfig; + private logger: LoggerService; + private ticket?: string; + private csrfToken?: string; + private retryConfig: RetryConfig; + private httpsAgent?: https.Agent; + + /** + * Create a new ProxmoxClient instance + * + * @param config - Proxmox configuration + * @param logger - Logger service instance + */ + constructor(config: ProxmoxConfig, logger: LoggerService) { + this.config = config; + this.logger = logger; + this.baseUrl = `https://${config.host}:${String(config.port ?? 8006)}`; + + // Build a per-client HTTPS agent with SSL options + // This allows self-signed certs without the process-wide NODE_TLS_REJECT_UNAUTHORIZED hack + const agentOptions: https.AgentOptions = { + keepAlive: true, + }; + + if (config.ssl) { + if (config.ssl.rejectUnauthorized === false) { + agentOptions.rejectUnauthorized = false; + this.logger.warn( + "Proxmox TLS certificate verification is disabled (ssl.rejectUnauthorized=false). " + + "Consider configuring a trusted CA certificate (PROXMOX_SSL_CA) instead.", + { + component: "ProxmoxClient", + operation: "constructor", + } + ); + } + + if (config.ssl.ca) { + try { + agentOptions.ca = readFileSync(config.ssl.ca); + } catch (err) { + this.logger.error("Failed to read Proxmox SSL CA file", { + component: "ProxmoxClient", + operation: "constructor", + metadata: { path: config.ssl.ca }, + }, err instanceof Error ? err : undefined); + } + } + + if (config.ssl.cert) { + try { + agentOptions.cert = readFileSync(config.ssl.cert); + } catch (err) { + this.logger.error("Failed to read Proxmox SSL cert file", { + component: "ProxmoxClient", + operation: "constructor", + metadata: { path: config.ssl.cert }, + }, err instanceof Error ? err : undefined); + } + } + + if (config.ssl.key) { + try { + agentOptions.key = readFileSync(config.ssl.key); + } catch (err) { + this.logger.error("Failed to read Proxmox SSL key file", { + component: "ProxmoxClient", + operation: "constructor", + metadata: { path: config.ssl.key }, + }, err instanceof Error ? err : undefined); + } + } + } + + this.httpsAgent = new https.Agent(agentOptions); + + // Configure retry logic + this.retryConfig = { + maxAttempts: 3, + initialDelay: 1000, + maxDelay: 10000, + backoffMultiplier: 2, + retryableErrors: ["ECONNRESET", "ETIMEDOUT", "ENOTFOUND"], + }; + + this.logger.debug("ProxmoxClient initialized", { + component: "ProxmoxClient", + operation: "constructor", + metadata: { + host: config.host, + port: config.port ?? 8006, + tlsVerify: config.ssl?.rejectUnauthorized !== false, + }, + }); + } + + /** + * Authenticate with the Proxmox API + * + * For token authentication: stores the token for use in Authorization header + * For password authentication: fetches and stores authentication ticket and CSRF token + * + * @throws {ProxmoxAuthenticationError} If authentication fails + */ + async authenticate(): Promise { + if (this.config.token) { + // Token authentication - no need to fetch ticket + this.logger.info("Using token authentication", { + component: "ProxmoxClient", + operation: "authenticate", + }); + return; + } + + // Password authentication - fetch ticket + const endpoint = "/api2/json/access/ticket"; + const params = { + username: `${this.config.username ?? ""}@${this.config.realm ?? ""}`, + password: this.config.password, + }; + + try { + this.logger.debug("Authenticating with password", { + component: "ProxmoxClient", + operation: "authenticate", + metadata: { + username: this.config.username, + realm: this.config.realm, + }, + }); + + const response = (await this.request( + "POST", + endpoint, + params, + false + )) as { ticket: string; CSRFPreventionToken: string }; + this.ticket = response.ticket; + this.csrfToken = response.CSRFPreventionToken; + + this.logger.info("Authentication successful", { + component: "ProxmoxClient", + operation: "authenticate", + }); + } catch (error) { + this.logger.error( + "Failed to authenticate with Proxmox API", + { + component: "ProxmoxClient", + operation: "authenticate", + }, + error instanceof Error ? error : undefined + ); + + throw new ProxmoxAuthenticationError( + "Failed to authenticate with Proxmox API", + error + ); + } + } + + /** + * Execute a GET request + * + * @param endpoint - API endpoint path + * @returns Response data + */ + async get(endpoint: string): Promise { + return await this.requestWithRetry("GET", endpoint); + } + + /** + * Execute a POST request + * + * @param endpoint - API endpoint path + * @param data - Request body data + * @returns Task ID (UPID) for async operations + */ + async post(endpoint: string, data: unknown): Promise { + const response = await this.requestWithRetry("POST", endpoint, data); + // Proxmox returns task ID (UPID) for async operations + return response as string; + } + + /** + * Execute a DELETE request + * + * @param endpoint - API endpoint path + * @returns Task ID (UPID) for async operations + */ + async delete(endpoint: string): Promise { + const response = await this.requestWithRetry("DELETE", endpoint); + return response as string; + } + + /** + * Wait for a Proxmox task to complete + * + * Polls the task status endpoint until the task completes or times out. + * + * @param node - Node name where the task is running + * @param taskId - Task ID (UPID) + * @param timeout - Timeout in milliseconds (default: 300000 = 5 minutes) + * @throws {ProxmoxError} If task fails or times out + */ + async waitForTask( + node: string, + taskId: string, + timeout = 300000 + ): Promise { + const startTime = Date.now(); + const pollInterval = 2000; // 2 seconds + + this.logger.debug("Waiting for task to complete", { + component: "ProxmoxClient", + operation: "waitForTask", + metadata: { node, taskId, timeout }, + }); + + while (Date.now() - startTime < timeout) { + const endpoint = `/api2/json/nodes/${node}/tasks/${taskId}/status`; + const status = (await this.get(endpoint)) as ProxmoxTaskStatus; + + if (status.status === "stopped") { + if (status.exitstatus === "OK") { + this.logger.info("Task completed successfully", { + component: "ProxmoxClient", + operation: "waitForTask", + metadata: { node, taskId }, + }); + return; + } else { + this.logger.error("Task failed", { + component: "ProxmoxClient", + operation: "waitForTask", + metadata: { node, taskId, exitstatus: status.exitstatus }, + }); + + throw new ProxmoxError( + `Task failed: ${status.exitstatus ?? "unknown"}`, + "TASK_FAILED", + status + ); + } + } + + await this.sleep(pollInterval); + } + + this.logger.error("Task timeout", { + component: "ProxmoxClient", + operation: "waitForTask", + metadata: { node, taskId, timeout }, + }); + + throw new ProxmoxError( + `Task timeout after ${String(timeout)}ms`, + "TASK_TIMEOUT", + { taskId, node } + ); + } + + /** + * Execute a request with retry logic + * + * @param method - HTTP method + * @param endpoint - API endpoint path + * @param data - Optional request body data + * @returns Response data + */ + private async requestWithRetry( + method: string, + endpoint: string, + data?: unknown + ): Promise { + let lastError: Error | undefined; + + for (let attempt = 1; attempt <= this.retryConfig.maxAttempts; attempt++) { + try { + return await this.request(method, endpoint, data); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + + // Don't retry authentication errors + if (error instanceof ProxmoxAuthenticationError) { + throw error; + } + + // Don't retry 4xx errors except 429 + if (error instanceof ProxmoxError && error.code.startsWith("HTTP_4")) { + if (error.code !== "HTTP_429") { + throw error; + } + // Handle rate limiting + const details = error.details as { retryAfter?: number } | undefined; + const retryAfter = details?.retryAfter ?? 5000; + await this.sleep(retryAfter); + continue; + } + + // Check if error is retryable + const isRetryable = this.retryConfig.retryableErrors.some((errCode) => + lastError?.message.includes(errCode) + ); + + if (!isRetryable || attempt === this.retryConfig.maxAttempts) { + throw error; + } + + // Calculate backoff delay + const delay = Math.min( + this.retryConfig.initialDelay * + Math.pow(this.retryConfig.backoffMultiplier, attempt - 1), + this.retryConfig.maxDelay + ); + + this.logger.warn( + `Request failed, retrying (attempt ${String(attempt)}/${String(this.retryConfig.maxAttempts)})`, + { + component: "ProxmoxClient", + operation: "requestWithRetry", + metadata: { endpoint, attempt, delay }, + } + ); + + await this.sleep(delay); + } + } + + throw lastError ?? new Error("Request failed after retries"); + } + + /** + * Execute an HTTP request + * + * @param method - HTTP method + * @param endpoint - API endpoint path + * @param data - Optional request body data + * @param useAuth - Whether to include authentication (default: true) + * @returns Response data + */ + private async request( + method: string, + endpoint: string, + data?: unknown, + useAuth = true + ): Promise { + const url = `${this.baseUrl}${endpoint}`; + const headers: Record = {}; + + // Proxmox API expects form-urlencoded for POST/PUT/DELETE, not JSON + let body: string | undefined; + if (data && method !== "GET") { + headers["Content-Type"] = "application/x-www-form-urlencoded"; + const params = new URLSearchParams(); + for (const [key, value] of Object.entries(data as Record)) { + if (value !== undefined && value !== null) { + params.append(key, String(value)); + } + } + body = params.toString(); + } else { + headers["Content-Type"] = "application/json"; + body = data ? JSON.stringify(data) : undefined; + } + + // Add authentication + if (useAuth) { + if (this.config.token) { + headers.Authorization = `PVEAPIToken=${this.config.token}`; + } else if (this.ticket) { + headers.Cookie = `PVEAuthCookie=${this.ticket}`; + if (method !== "GET" && this.csrfToken) { + headers.CSRFPreventionToken = this.csrfToken; + } + } + } + + try { + const response = await this.fetchWithTimeout(url, { + method, + headers, + body, + }); + + return await this.handleResponse(response); + } catch (error) { + // Handle ticket expiration + if (error instanceof ProxmoxAuthenticationError && this.ticket) { + this.logger.info("Authentication ticket expired, re-authenticating", { + component: "ProxmoxClient", + operation: "request", + }); + await this.authenticate(); + // Retry request with new ticket + return await this.request(method, endpoint, data, useAuth); + } + throw error; + } + } + + /** + * Handle HTTP response + * + * @param response - Fetch response object + * @returns Response data + * @throws {ProxmoxError} For HTTP errors + * @throws {ProxmoxAuthenticationError} For authentication errors + */ + private async handleResponse(response: Response): Promise { + // Handle authentication errors + if (response.status === 401 || response.status === 403) { + throw new ProxmoxAuthenticationError("Authentication failed", { + status: response.status, + }); + } + + // Handle not found + if (response.status === 404) { + throw new ProxmoxError("Resource not found", "HTTP_404", { + status: response.status, + }); + } + + // Handle other errors + if (!response.ok) { + const errorText = await response.text(); + // Include the body in the message for better diagnostics + const detail = errorText ? `: ${errorText}` : ""; + throw new ProxmoxError( + `Proxmox API error: ${response.statusText}${detail}`, + `HTTP_${String(response.status)}`, + { + status: response.status, + statusText: response.statusText, + body: errorText, + } + ); + } + + // Parse JSON response + const json = (await response.json()) as { data: unknown }; + return json.data; // Proxmox wraps responses in {data: ...} + } + + /** + * Fetch with timeout using node:https for per-client TLS configuration + * + * Uses node:https.request instead of native fetch() because Node.js native fetch + * does not support per-request TLS options (rejectUnauthorized, custom CA, etc.). + * The custom https.Agent configured in the constructor carries the SSL settings. + * + * @param url - Request URL + * @param options - Fetch-like options (method, headers, body) + * @param timeout - Timeout in milliseconds (default: 30000) + * @returns A Response-compatible object + */ + private async fetchWithTimeout( + url: string, + options: RequestInit, + timeout = 30000 + ): Promise { + return new Promise((resolve, reject) => { + const parsed = new URL(url); + const isHttps = parsed.protocol === "https:"; + const transport = isHttps ? https : http; + + const reqOptions: https.RequestOptions = { + hostname: parsed.hostname, + port: parsed.port || (isHttps ? 443 : 80), + path: parsed.pathname + parsed.search, + method: options.method ?? "GET", + headers: options.headers as Record, + timeout, + ...(isHttps && this.httpsAgent ? { agent: this.httpsAgent } : {}), + }; + + const req = transport.request(reqOptions, (res) => { + const chunks: Buffer[] = []; + res.on("data", (chunk: Buffer) => chunks.push(chunk)); + res.on("end", () => { + const bodyText = Buffer.concat(chunks).toString("utf-8"); + // Build a Response-compatible object so the rest of the client code is unchanged + const response = new Response(bodyText, { + status: res.statusCode ?? 500, + statusText: res.statusMessage ?? "", + headers: new Headers(res.headers as Record), + }); + resolve(response); + }); + }); + + req.on("timeout", () => { + req.destroy(); + reject(new Error("Request timed out")); + }); + + req.on("error", (err) => { + reject(err); + }); + + if (options.body) { + req.write(options.body); + } + req.end(); + }); + } + + /** + * Sleep for a specified duration + * + * @param ms - Duration in milliseconds + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => { + setTimeout(resolve, ms); + }); + } +} diff --git a/backend/src/integrations/proxmox/ProxmoxIntegration.ts b/backend/src/integrations/proxmox/ProxmoxIntegration.ts new file mode 100644 index 00000000..f3d82db1 --- /dev/null +++ b/backend/src/integrations/proxmox/ProxmoxIntegration.ts @@ -0,0 +1,495 @@ +/** + * Proxmox Integration Plugin + * + * Plugin class that integrates Proxmox Virtual Environment into Pabawi. + * Implements both InformationSourcePlugin and ExecutionToolPlugin interfaces. + */ + +import { BasePlugin } from "../BasePlugin"; +import type { + HealthStatus, + InformationSourcePlugin, + ExecutionToolPlugin, + NodeGroup, + Capability, + Action, +} from "../types"; +import type { Node, Facts, ExecutionResult } from "../bolt/types"; +import type { LoggerService } from "../../services/LoggerService"; +import type { PerformanceMonitorService } from "../../services/PerformanceMonitorService"; +import type { JournalService } from "../../services/journal/JournalService"; +import type { CreateJournalEntry } from "../../services/journal/types"; +import { ProxmoxService } from "./ProxmoxService"; +import type { ProxmoxConfig, ProvisioningCapability } from "./types"; + +/** + * ProxmoxIntegration - Plugin for Proxmox Virtual Environment + * + * Provides: + * - Inventory discovery of VMs and containers + * - Group management (by node, status, type) + * - Facts retrieval for guests + * - Lifecycle actions (start, stop, shutdown, reboot, suspend, resume) + * - Provisioning capabilities (create/destroy VMs and containers) + * + * Validates: Requirements 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 2.1-2.6, 4.1, 16.1-16.6 + */ +export class ProxmoxIntegration + extends BasePlugin + implements InformationSourcePlugin, ExecutionToolPlugin +{ + type = "both" as const; + private service?: ProxmoxService; + private journalService?: JournalService; + + /** + * Create a new ProxmoxIntegration instance + * + * @param logger - Logger service instance (optional) + * @param performanceMonitor - Performance monitor service instance (optional) + */ + constructor( + logger?: LoggerService, + performanceMonitor?: PerformanceMonitorService + ) { + super("proxmox", "both", logger, performanceMonitor); + + this.logger.debug("ProxmoxIntegration created", { + component: "ProxmoxIntegration", + operation: "constructor", + }); + } + + /** + * Perform plugin-specific initialization + * + * Validates Proxmox configuration and initializes ProxmoxService. + * Logs security warning if SSL certificate verification is disabled. + * + * Validates: Requirements 1.4, 1.5, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 16.1-16.6 + * + * @throws Error if configuration is invalid + */ + protected async performInitialization(): Promise { + this.logger.info("Initializing Proxmox integration", { + component: "ProxmoxIntegration", + operation: "performInitialization", + }); + + // Extract and validate Proxmox configuration + const config = this.config.config as unknown as ProxmoxConfig; + this.validateProxmoxConfig(config); + + // Initialize service with configuration + this.service = new ProxmoxService( + config, + this.logger, + this.performanceMonitor + ); + await this.service.initialize(); + + this.logger.info("Proxmox integration initialized successfully", { + component: "ProxmoxIntegration", + operation: "performInitialization", + }); + } + + /** + * Validate Proxmox configuration + * + * Validates: + * - Host is a valid hostname or IP address + * - Port is in valid range (1-65535) + * - Either password or token authentication is configured + * - Realm is provided for password authentication + * - Logs security warning if SSL verification is disabled + * + * Validates: Requirements 2.3, 2.4, 2.6, 16.1, 16.2, 16.3, 16.4, 16.5, 16.6 + * + * @param config - Proxmox configuration to validate + * @throws Error if configuration is invalid + * @private + */ + private validateProxmoxConfig(config: ProxmoxConfig): void { + this.logger.debug("Validating Proxmox configuration", { + component: "ProxmoxIntegration", + operation: "validateProxmoxConfig", + }); + + // Validate host (hostname or IP) + if (!config.host || typeof config.host !== "string") { + throw new Error("Proxmox configuration must include a valid host"); + } + + // Validate port range + if (config.port !== undefined) { + if (typeof config.port !== "number" || config.port < 1 || config.port > 65535) { + throw new Error("Proxmox port must be between 1 and 65535"); + } + } + + // Validate authentication - either token or password must be provided + if (!config.token && !config.password) { + throw new Error( + "Proxmox configuration must include either token or password authentication" + ); + } + + // Validate realm for password authentication + if (config.password && !config.realm) { + throw new Error( + "Proxmox password authentication requires a realm" + ); + } + + // Log security warning if cert verification disabled + if (config.ssl?.rejectUnauthorized === false) { + this.logger.warn( + "TLS certificate verification is disabled - this is insecure", + { + component: "ProxmoxIntegration", + operation: "validateProxmoxConfig", + } + ); + } + + this.logger.debug("Proxmox configuration validated successfully", { + component: "ProxmoxIntegration", + operation: "validateProxmoxConfig", + }); + } + + /** + * Perform plugin-specific health check + * + * Delegates to ProxmoxService to check API connectivity. + * Returns healthy if API is reachable, degraded if authentication fails, + * and unhealthy if API is unreachable. + * + * Validates: Requirements 4.1, 4.2, 4.3, 4.4, 4.5 + * + * @returns Health status (without lastCheck timestamp) + */ + protected async performHealthCheck(): Promise< + Omit + > { + if (!this.service) { + return { + healthy: false, + message: "ProxmoxService not initialized", + }; + } + + return await this.service.healthCheck(); + } + + // ======================================== + // InformationSourcePlugin Interface Methods + // ======================================== + + /** + * Get inventory of all VMs and containers + * + * Delegates to ProxmoxService to retrieve all guests from the Proxmox cluster. + * Results are cached for 60 seconds to reduce API load. + * + * Validates: Requirements 5.1-5.7, 14.3, 14.4, 16.1, 16.2, 16.3 + * + * @param computeType - Optional filter: "qemu" for VMs only, "lxc" for containers only + * @returns Array of Node objects representing all guests (or filtered subset) + * @throws Error if service is not initialized or API call fails + */ + async getInventory(computeType?: "qemu" | "lxc"): Promise { + this.ensureInitialized(); + return await this.service!.getInventory(computeType); + } + + /** + * Get groups of VMs and containers + * + * Delegates to ProxmoxService to create NodeGroup objects organized by + * Proxmox node, status, and type. Results are cached for 60 seconds. + * + * Validates: Requirements 6.1-6.7 + * + * @returns Array of NodeGroup objects + * @throws Error if service is not initialized or API call fails + */ + async getGroups(): Promise { + this.ensureInitialized(); + return await this.service!.getGroups(); + } + + /** + * Get detailed facts for a specific guest + * + * Delegates to ProxmoxService to retrieve configuration and status information + * for a VM or container. Results are cached for 30 seconds. + * + * Validates: Requirements 7.1-7.7 + * + * @param nodeId - Node identifier in format proxmox:{node}:{vmid} + * @returns Facts object with CPU, memory, disk, network config and current usage + * @throws Error if service is not initialized, nodeId format is invalid, or guest doesn't exist + */ + async getNodeFacts(nodeId: string): Promise { + this.ensureInitialized(); + return await this.service!.getNodeFacts(nodeId); + } + + /** + * Get arbitrary data for a node + * + * Proxmox integration does not support additional data types beyond facts. + * This method returns null for all data type requests. + * + * @param nodeId - Node identifier + * @param dataType - Type of data to retrieve + * @returns null (no additional data types supported) + */ + async getNodeData(_nodeId: string, _dataType: string): Promise { + this.ensureInitialized(); + + // Proxmox integration doesn't support additional data types beyond facts + // Return null to indicate no data available for the requested type + return null; + } + + // ======================================== + // ExecutionToolPlugin Interface Methods + // ======================================== + + /** + * Execute an action on a guest or provision new infrastructure + * + * Delegates to ProxmoxService to execute lifecycle actions (start, stop, shutdown, + * reboot, suspend, resume) or provisioning actions (create_vm, create_lxc, + * destroy_vm, destroy_lxc). + * + * Validates: Requirements 8.1-8.10, 9.3, 9.4, 10.1-10.7, 11.1-11.7, 12.1-12.7 + * + * @param action - Action to execute + * @returns ExecutionResult with success/error details + * @throws Error if service is not initialized or action is invalid + */ + async executeAction(action: Action): Promise { + this.ensureInitialized(); + + const target = Array.isArray(action.target) ? action.target[0] : action.target; + + try { + const result = await this.service!.executeAction(action); + await this.recordJournal(action, target, result); + return result; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + await this.recordJournalFailure(action, target, errorMessage); + throw error; + } + } + + /** + * List lifecycle action capabilities + * + * Returns all lifecycle actions that can be performed on VMs and containers. + * + * Validates: Requirements 8.1, 8.2 + * + * @returns Array of Capability objects + */ + listCapabilities(): Capability[] { + this.ensureInitialized(); + return this.service!.listCapabilities(); + } + + /** + * List provisioning capabilities + * + * Returns all provisioning capabilities supported by this integration, + * including VM and LXC creation and destruction. + * + * Validates: Requirements 9.3, 9.4 + * + * @returns Array of ProvisioningCapability objects + */ + listProvisioningCapabilities(): ProvisioningCapability[] { + this.ensureInitialized(); + return this.service!.listProvisioningCapabilities(); + } + + /** + * Get list of PVE nodes in the cluster + */ + async getNodes(): Promise<{ node: string; status: string; maxcpu?: number; maxmem?: number }[]> { + this.ensureInitialized(); + return this.service!.getNodes(); + } + + /** + * Get the next available VMID + */ + async getNextVMID(): Promise { + this.ensureInitialized(); + return this.service!.getNextVMID(); + } + + /** + * Get ISO images available on a node + */ + async getISOImages(node: string, storage?: string): Promise<{ volid: string; format: string; size: number }[]> { + this.ensureInitialized(); + return this.service!.getISOImages(node, storage); + } + + /** + * Get OS templates available on a node + */ + async getTemplates(node: string, storage?: string): Promise<{ volid: string; format: string; size: number }[]> { + this.ensureInitialized(); + return this.service!.getTemplates(node, storage); + } + + async getStorages(node: string, contentType?: string): Promise<{ storage: string; type: string; content: string; active: number; total?: number; used?: number; avail?: number }[]> { + this.ensureInitialized(); + return this.service!.getStorages(node, contentType); + } + + async getNetworkBridges(node: string, type?: string): Promise<{ iface: string; type: string; active: number; address?: string; cidr?: string; bridge_ports?: string }[]> { + this.ensureInitialized(); + return this.service!.getNetworkBridges(node, type); + } + + // ======================================== + // Journal Integration + // ======================================== + + /** + * Set the JournalService for recording events + * + * @param journalService - JournalService instance + */ + setJournalService(journalService: JournalService): void { + this.journalService = journalService; + } + + /** + * Record a journal entry for a completed action (success or failure). + * Validates: Requirements 10.4, 11.4, 22.1, 22.2, 22.3, 25.1 + */ + private async recordJournal( + action: Action, + target: string, + result: ExecutionResult + ): Promise { + if (!this.journalService) return; + + const eventType = this.mapActionToEventType(action.action); + const entry: CreateJournalEntry = { + nodeId: target, + nodeUri: `proxmox:${target}`, + eventType, + source: "proxmox", + action: action.action, + summary: + result.status === "success" + ? `Proxmox ${action.action} succeeded on ${target}` + : `Proxmox ${action.action} failed on ${target}: ${result.error ?? "unknown error"}`, + details: { + status: result.status, + parameters: action.parameters, + ...(result.error ? { error: result.error } : {}), + }, + }; + + try { + await this.journalService.recordEvent(entry); + } catch (err) { + this.logger.error("Failed to record journal entry", { + component: "ProxmoxIntegration", + operation: "recordJournal", + metadata: { error: err instanceof Error ? err.message : String(err) }, + }); + } + } + + /** + * Record a journal entry for a failure that throws. + */ + private async recordJournalFailure( + action: Action, + target: string, + errorMessage: string + ): Promise { + if (!this.journalService) return; + + const eventType = this.mapActionToEventType(action.action); + const entry: CreateJournalEntry = { + nodeId: target, + nodeUri: `proxmox:${target}`, + eventType, + source: "proxmox", + action: action.action, + summary: `Proxmox ${action.action} failed on ${target}: ${errorMessage}`, + details: { + status: "failed", + parameters: action.parameters, + error: errorMessage, + }, + }; + + try { + await this.journalService.recordEvent(entry); + } catch (err) { + this.logger.error("Failed to record journal entry", { + component: "ProxmoxIntegration", + operation: "recordJournalFailure", + metadata: { error: err instanceof Error ? err.message : String(err) }, + }); + } + } + + /** + * Map an action name to a JournalEventType + */ + private mapActionToEventType( + actionName: string + ): "provision" | "destroy" | "start" | "stop" | "reboot" | "suspend" | "resume" | "info" { + switch (actionName) { + case "create_vm": + case "create_lxc": + return "provision"; + case "destroy_vm": + case "destroy_lxc": + return "destroy"; + case "start": + return "start"; + case "stop": + case "shutdown": + return "stop"; + case "reboot": + return "reboot"; + case "suspend": + return "suspend"; + case "resume": + return "resume"; + default: + return "info"; + } + } + + // ======================================== + // Helper Methods + // ======================================== + + /** + * Ensure the plugin is initialized + * + * @throws Error if plugin is not initialized + * @private + */ + private ensureInitialized(): void { + if (!this.initialized || !this.service) { + throw new Error("Proxmox integration is not initialized"); + } + } +} diff --git a/backend/src/integrations/proxmox/ProxmoxService.ts b/backend/src/integrations/proxmox/ProxmoxService.ts new file mode 100644 index 00000000..9b7f7842 --- /dev/null +++ b/backend/src/integrations/proxmox/ProxmoxService.ts @@ -0,0 +1,2033 @@ +/** + * Proxmox Service + * + * Business logic layer for the Proxmox VE integration. + * Orchestrates API calls through ProxmoxClient and handles data transformation. + */ + +import type { LoggerService } from "../../services/LoggerService"; +import type { PerformanceMonitorService } from "../../services/PerformanceMonitorService"; +import type { HealthStatus, NodeGroup, Action, Capability } from "../types"; +import type { Node, Facts, ExecutionResult } from "../bolt/types"; +import { SimpleCache } from "../../utils/caching"; +import { ProxmoxClient } from "./ProxmoxClient"; +import type { + ProxmoxConfig, + ProxmoxGuest, + ProxmoxGuestConfig, + ProxmoxGuestStatus, + VMCreateParams, + LXCCreateParams, + ProvisioningCapability +} from "./types"; +import { ProxmoxAuthenticationError } from "./types"; + +/** + * ProxmoxService - Business logic layer for Proxmox integration + * + * Responsibilities: + * - Orchestrate API calls through ProxmoxClient + * - Transform Proxmox API responses to Pabawi data models + * - Implement caching strategy for inventory, groups, and facts + * - Handle data aggregation and grouping logic + * - Manage provisioning operations (create/destroy VMs and containers) + */ +export class ProxmoxService { + private client?: ProxmoxClient; + private cache: SimpleCache; + private logger: LoggerService; + private performanceMonitor: PerformanceMonitorService; + private config: ProxmoxConfig; + + /** + * Create a new ProxmoxService instance + * + * @param config - Proxmox configuration + * @param logger - Logger service instance + * @param performanceMonitor - Performance monitor service instance + */ + constructor( + config: ProxmoxConfig, + logger: LoggerService, + performanceMonitor: PerformanceMonitorService + ) { + this.config = config; + this.logger = logger; + this.performanceMonitor = performanceMonitor; + this.cache = new SimpleCache({ ttl: 60000 }); // Default 60s TTL + + this.logger.debug("ProxmoxService created", { + component: "ProxmoxService", + operation: "constructor", + }); + } + + /** + * Initialize the service + * + * Creates ProxmoxClient and authenticates with the Proxmox API. + */ + async initialize(): Promise { + this.logger.info("Initializing ProxmoxService", { + component: "ProxmoxService", + operation: "initialize", + }); + + this.client = new ProxmoxClient(this.config, this.logger); + await this.client.authenticate(); + + this.logger.info("ProxmoxService initialized successfully", { + component: "ProxmoxService", + operation: "initialize", + }); + } + + /** + * Perform health check + * + * Queries the Proxmox API version endpoint to verify connectivity. + * Returns healthy status if API is reachable, degraded if authentication fails, + * and unhealthy if API is unreachable. + * + * @returns Health status (without lastCheck timestamp) + */ + async healthCheck(): Promise> { + if (!this.client) { + return { + healthy: false, + message: "ProxmoxClient not initialized", + }; + } + + try { + this.logger.debug("Performing health check", { + component: "ProxmoxService", + operation: "healthCheck", + }); + + const version = await this.client.get("/api2/json/version"); + + this.logger.info("Health check successful", { + component: "ProxmoxService", + operation: "healthCheck", + metadata: { version }, + }); + + return { + healthy: true, + message: "Proxmox API is reachable", + details: { + version, + host: this.config.host, + port: this.config.port ?? 8006, + hasTokenAuth: !!this.config.token, + hasPasswordAuth: !!this.config.password, + sslRejectUnauthorized: this.config.ssl?.rejectUnauthorized ?? true, + }, + }; + } catch (error) { + if (error instanceof ProxmoxAuthenticationError) { + this.logger.warn("Health check failed: authentication error", { + component: "ProxmoxService", + operation: "healthCheck", + metadata: { error: error.message }, + }); + + return { + healthy: false, + degraded: true, + message: "Authentication failed", + details: { + error: error.message, + host: this.config.host, + port: this.config.port ?? 8006, + hasTokenAuth: !!this.config.token, + hasPasswordAuth: !!this.config.password, + sslRejectUnauthorized: this.config.ssl?.rejectUnauthorized ?? true, + }, + }; + } + + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Health check failed: API unreachable", + { + component: "ProxmoxService", + operation: "healthCheck", + metadata: { error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + return { + healthy: false, + message: "Proxmox API is unreachable", + details: { + error: errorMessage, + host: this.config.host, + port: this.config.port ?? 8006, + hasTokenAuth: !!this.config.token, + hasPasswordAuth: !!this.config.password, + sslRejectUnauthorized: this.config.ssl?.rejectUnauthorized ?? true, + }, + }; + } + } + + /** + * Get inventory of all VMs and containers + * + * Queries the Proxmox cluster resources endpoint for all guests (VMs and containers). + * Results are cached for 60 seconds to reduce API load. + * + * @param computeType - Optional filter: "qemu" for VMs only, "lxc" for containers only + * @returns Array of Node objects representing all guests (or filtered subset) + * @throws Error if client is not initialized or API call fails + */ + async getInventory(computeType?: "qemu" | "lxc"): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = "inventory:all"; + const cached = this.cache.get(cacheKey); + if (cached) { + let result = cached as Node[]; + if (computeType) { + const filteredComputeType = computeType === "qemu" ? "vm" : "lxc"; + result = result.filter( + (n) => (n as Node & { computeType?: string }).computeType === filteredComputeType + ); + } + this.logger.debug("Returning cached inventory", { + component: "ProxmoxService", + operation: "getInventory", + metadata: { nodeCount: result.length }, + }); + return result; + } + + const complete = this.performanceMonitor.startTimer("proxmox:getInventory"); + + try { + this.logger.debug("Fetching inventory from Proxmox API", { + component: "ProxmoxService", + operation: "getInventory", + }); + + // Query all cluster resources (VMs and containers) + const resources = await this.client.get( + "/api2/json/cluster/resources?type=vm" + ); + + if (!Array.isArray(resources)) { + throw new Error("Unexpected response format from Proxmox API"); + } + + // Transform each guest to a Node object, filtering out templates + const nodes = resources + .filter((guest) => { + const proxmoxGuest = guest as ProxmoxGuest; + // Filter out templates (template === 1) + if (proxmoxGuest.template === 1) { + this.logger.debug("Skipping template", { + component: "ProxmoxService", + operation: "getInventory", + metadata: { vmid: proxmoxGuest.vmid, name: proxmoxGuest.name }, + }); + return false; + } + return true; + }) + .map((guest) => + this.transformGuestToNode(guest as ProxmoxGuest) + ); + + // Apply computeType filter if specified + if (computeType) { + const filteredComputeType = computeType === "qemu" ? "vm" : "lxc"; + const filtered = nodes.filter( + (n) => (n as Node & { computeType?: string }).computeType === filteredComputeType + ); + + // Cache the full set, return filtered + this.cache.set(cacheKey, nodes, 60000); + + this.logger.info("Inventory fetched and filtered successfully", { + component: "ProxmoxService", + operation: "getInventory", + metadata: { totalCount: nodes.length, filteredCount: filtered.length, computeType, cached: false }, + }); + + complete({ cached: false, nodeCount: filtered.length }); + return filtered; + } + + // Cache for 60 seconds + this.cache.set(cacheKey, nodes, 60000); + + this.logger.info("Inventory fetched successfully", { + component: "ProxmoxService", + operation: "getInventory", + metadata: { nodeCount: nodes.length, cached: false }, + }); + + complete({ cached: false, nodeCount: nodes.length }); + + return nodes; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Failed to fetch inventory", + { + component: "ProxmoxService", + operation: "getInventory", + metadata: { error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + complete({ error: errorMessage }); + throw error; + } + } + + /** + * Transform a Proxmox guest to a Node object + * + * Converts Proxmox API guest data to Pabawi's Node format. + * Node ID format: proxmox:{node}:{vmid} + * + * @param guest - Proxmox guest object from API + * @returns Node object with standardized fields + * @private + */ + private transformGuestToNode(guest: ProxmoxGuest): Node { + // Node ID format: proxmox:{node}:{vmid} + const nodeId = `proxmox:${guest.node}:${guest.vmid}`; + + // Build metadata object + const metadata: Record = { + vmid: guest.vmid, + node: guest.node, + type: guest.type, + status: guest.status, + }; + + // Add optional fields if present + if (guest.maxmem !== undefined) { + metadata.maxmem = guest.maxmem; + } + if (guest.maxdisk !== undefined) { + metadata.maxdisk = guest.maxdisk; + } + if (guest.cpus !== undefined) { + metadata.cpus = guest.cpus; + } + if (guest.uptime !== undefined) { + metadata.uptime = guest.uptime; + } + + // Create Node object + const node: Node = { + id: nodeId, + name: guest.name, + uri: `proxmox://${guest.node}/${guest.vmid}`, + transport: "ssh" as const, // Default transport, can be overridden + config: {}, + source: "proxmox", + }; + + // Add computeType field: "qemu" → "vm", "lxc" → "lxc" + const computeType = guest.type === "qemu" ? "vm" : "lxc"; + + // Add metadata + (node as Node & { metadata?: Record }).metadata = metadata; + + // Add computeType to the node + (node as Node & { computeType?: string }).computeType = computeType; + + // Add status if available (map to a custom field since Node doesn't have status) + if (guest.status) { + (node as Node & { status?: string }).status = guest.status; + } + + this.logger.debug("Transformed guest to node", { + component: "ProxmoxService", + operation: "transformGuestToNode", + metadata: { vmid: guest.vmid, nodeId }, + }); + + return node; + } + + /** + * Get groups of VMs and containers + * + * Creates NodeGroup objects organized by Proxmox node, status, and type. + * Results are cached for 60 seconds to reduce API load. + * + * @returns Array of NodeGroup objects + * @throws Error if client is not initialized or API call fails + */ + async getGroups(): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = "groups:all"; + const cached = this.cache.get(cacheKey); + if (cached) { + this.logger.debug("Returning cached groups", { + component: "ProxmoxService", + operation: "getGroups", + metadata: { groupCount: (cached as NodeGroup[]).length }, + }); + return cached as NodeGroup[]; + } + + try { + this.logger.debug("Building groups from inventory", { + component: "ProxmoxService", + operation: "getGroups", + }); + + // Reuse inventory data + const inventory = await this.getInventory(); + const groups: NodeGroup[] = []; + + // Group by node + const nodeGroups = this.groupByNode(inventory); + groups.push(...nodeGroups); + + // Group by status + const statusGroups = this.groupByStatus(inventory); + groups.push(...statusGroups); + + // Group by type (VM vs LXC) + const typeGroups = this.groupByType(inventory); + groups.push(...typeGroups); + + // Cache for 60 seconds + this.cache.set(cacheKey, groups, 60000); + + this.logger.info("Groups built successfully", { + component: "ProxmoxService", + operation: "getGroups", + metadata: { groupCount: groups.length, cached: false }, + }); + + return groups; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Failed to build groups", + { + component: "ProxmoxService", + operation: "getGroups", + metadata: { error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + throw error; + } + } + + /** + * Group nodes by Proxmox node + * + * Creates one NodeGroup per physical Proxmox node. + * Group ID format: proxmox:node:{nodename} + * + * @param nodes - Array of Node objects from inventory + * @returns Array of NodeGroup objects grouped by node + * @private + */ + private groupByNode(nodes: Node[]): NodeGroup[] { + const nodeMap = new Map(); + + // Group nodes by their Proxmox node + for (const node of nodes) { + const proxmoxNode = (node as Node & { metadata?: Record }) + .metadata?.node as string; + + if (!proxmoxNode) { + continue; + } + + if (!nodeMap.has(proxmoxNode)) { + nodeMap.set(proxmoxNode, []); + } + nodeMap.get(proxmoxNode)!.push(node); + } + + // Create NodeGroup objects + const groups: NodeGroup[] = []; + for (const [nodeName, nodeList] of nodeMap.entries()) { + groups.push({ + id: `proxmox:node:${nodeName}`, + name: `Proxmox Node: ${nodeName}`, + source: "proxmox", + sources: ["proxmox"], + linked: false, + nodes: nodeList.map((n) => n.id), + metadata: { + description: `All guests on Proxmox node ${nodeName}`, + nodeType: "physical", + }, + }); + } + + this.logger.debug("Grouped nodes by Proxmox node", { + component: "ProxmoxService", + operation: "groupByNode", + metadata: { groupCount: groups.length }, + }); + + return groups; + } + + /** + * Group nodes by status + * + * Creates one NodeGroup per status type (running, stopped, paused). + * Group ID format: proxmox:status:{status} + * + * @param nodes - Array of Node objects from inventory + * @returns Array of NodeGroup objects grouped by status + * @private + */ + private groupByStatus(nodes: Node[]): NodeGroup[] { + const statusMap = new Map(); + + // Group nodes by their status + for (const node of nodes) { + const status = (node as Node & { status?: string }).status; + + if (!status) { + continue; + } + + if (!statusMap.has(status)) { + statusMap.set(status, []); + } + statusMap.get(status)!.push(node); + } + + // Create NodeGroup objects + const groups: NodeGroup[] = []; + for (const [status, nodeList] of statusMap.entries()) { + groups.push({ + id: `proxmox:status:${status}`, + name: `Status: ${status}`, + source: "proxmox", + sources: ["proxmox"], + linked: false, + nodes: nodeList.map((n) => n.id), + metadata: { + description: `All guests with status ${status}`, + statusType: status, + }, + }); + } + + this.logger.debug("Grouped nodes by status", { + component: "ProxmoxService", + operation: "groupByStatus", + metadata: { groupCount: groups.length }, + }); + + return groups; + } + + /** + * Group nodes by type + * + * Creates one NodeGroup per guest type (qemu for VMs, lxc for containers). + * Group ID format: proxmox:type:{type} + * + * @param nodes - Array of Node objects from inventory + * @returns Array of NodeGroup objects grouped by type + * @private + */ + private groupByType(nodes: Node[]): NodeGroup[] { + const typeMap = new Map(); + + // Group nodes by their type + for (const node of nodes) { + const type = (node as Node & { metadata?: Record }) + .metadata?.type as string; + + if (!type) { + continue; + } + + if (!typeMap.has(type)) { + typeMap.set(type, []); + } + typeMap.get(type)!.push(node); + } + + // Create NodeGroup objects + const groups: NodeGroup[] = []; + for (const [type, nodeList] of typeMap.entries()) { + const displayName = type === "qemu" ? "Proxmox VMs" : "Proxmox Containers"; + groups.push({ + id: `proxmox:type:${type}`, + name: displayName, + source: "proxmox", + sources: ["proxmox"], + linked: false, + nodes: nodeList.map((n) => n.id), + metadata: { + description: `All ${displayName.toLowerCase()}`, + guestType: type, + }, + }); + } + + this.logger.debug("Grouped nodes by type", { + component: "ProxmoxService", + operation: "groupByType", + metadata: { groupCount: groups.length }, + }); + + return groups; + } + + /** + * Get detailed facts for a specific guest + * + * Retrieves configuration and status information for a VM or container. + * Results are cached for 30 seconds to reduce API load. + * + * Node ID format: proxmox:{node}:{vmid} + * + * @param nodeId - Node identifier in format proxmox:{node}:{vmid} + * @returns Facts object with CPU, memory, disk, network config and current usage + * @throws Error if client is not initialized, nodeId format is invalid, or guest doesn't exist + */ + async getNodeFacts(nodeId: string): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = `facts:${nodeId}`; + const cached = this.cache.get(cacheKey); + if (cached) { + this.logger.debug("Returning cached facts", { + component: "ProxmoxService", + operation: "getNodeFacts", + metadata: { nodeId }, + }); + return cached as Facts; + } + + try { + this.logger.debug("Fetching facts from Proxmox API", { + component: "ProxmoxService", + operation: "getNodeFacts", + metadata: { nodeId }, + }); + + // Parse VMID and node name from nodeId (format: "proxmox:{node}:{vmid}") + const vmid = this.parseVMID(nodeId); + const node = this.parseNodeName(nodeId); + + // Determine guest type (qemu or lxc) + const guestType = await this.getGuestType(node, vmid); + + // Fetch configuration + const configEndpoint = + guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/config` + : `/api2/json/nodes/${node}/qemu/${vmid}/config`; + + const config = await this.client.get(configEndpoint); + + // Fetch current status + const statusEndpoint = + guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/current` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/current`; + + const status = await this.client.get(statusEndpoint); + + // Transform to Facts object + const facts = this.transformToFacts(nodeId, config, status, guestType); + + // Cache for 30 seconds + this.cache.set(cacheKey, facts, 30000); + + this.logger.info("Facts fetched successfully", { + component: "ProxmoxService", + operation: "getNodeFacts", + metadata: { nodeId, guestType, cached: false }, + }); + + return facts; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Failed to fetch facts", + { + component: "ProxmoxService", + operation: "getNodeFacts", + metadata: { nodeId, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + throw error; + } + } + + /** + * Parse VMID from nodeId + * + * Extracts the VMID from a nodeId in format proxmox:{node}:{vmid} + * + * @param nodeId - Node identifier + * @returns VMID as number + * @throws Error if nodeId format is invalid + * @private + */ + private parseVMID(nodeId: string): number { + const parts = nodeId.split(":"); + if (parts.length !== 3 || parts[0] !== "proxmox") { + throw new Error( + `Invalid nodeId format: ${nodeId}. Expected format: proxmox:{node}:{vmid}` + ); + } + + const vmid = parseInt(parts[2], 10); + if (isNaN(vmid)) { + throw new Error(`Invalid VMID in nodeId: ${nodeId}`); + } + + return vmid; + } + + /** + * Parse node name from nodeId + * + * Extracts the Proxmox node name from a nodeId in format proxmox:{node}:{vmid} + * + * @param nodeId - Node identifier + * @returns Proxmox node name + * @throws Error if nodeId format is invalid + * @private + */ + private parseNodeName(nodeId: string): string { + const parts = nodeId.split(":"); + if (parts.length !== 3 || parts[0] !== "proxmox") { + throw new Error( + `Invalid nodeId format: ${nodeId}. Expected format: proxmox:{node}:{vmid}` + ); + } + + return parts[1]; + } + + /** + * Determine guest type (qemu or lxc) + * + * Queries the cluster resources to determine if a guest is a VM (qemu) or container (lxc). + * This is necessary because we need to know the type to construct the correct API endpoints. + * + * @param node - Proxmox node name + * @param vmid - Guest VMID + * @returns Guest type ('qemu' or 'lxc') + * @throws Error if guest doesn't exist + * @private + */ + private async getGuestType( + node: string, + vmid: number + ): Promise<"qemu" | "lxc"> { + // Query cluster resources to find the guest + const resources = await this.client!.get( + "/api2/json/cluster/resources?type=vm" + ); + + if (!Array.isArray(resources)) { + throw new Error("Unexpected response format from Proxmox API"); + } + + // Find the guest by node and vmid + const guest = resources.find( + (r: ProxmoxGuest) => r.node === node && r.vmid === vmid + ); + + if (!guest) { + throw new Error( + `Guest with VMID ${vmid} not found on node ${node}` + ); + } + + return (guest as ProxmoxGuest).type; + } + + /** + * Transform Proxmox config and status to Facts object + * + * Converts Proxmox API responses to Pabawi's Facts format. + * Includes CPU, memory, disk, and network configuration. + * Includes current usage when guest is running. + * + * @param nodeId - Node identifier + * @param config - Guest configuration from Proxmox API + * @param status - Guest status from Proxmox API + * @param guestType - Guest type ('qemu' or 'lxc') + * @returns Facts object + * @private + */ + private transformToFacts( + nodeId: string, + config: unknown, + status: unknown, + guestType: "qemu" | "lxc" + ): Facts { + const configData = config as ProxmoxGuestConfig; + const statusData = status as ProxmoxGuestStatus; + + // Extract network interfaces + const interfaces: Record = {}; + let hostname = configData.name || "unknown"; + + // Parse network configuration (net0, net1, etc.) + for (const key of Object.keys(configData)) { + if (key.startsWith("net")) { + interfaces[key] = configData[key]; + } + } + + // For LXC, hostname might be in config + if (guestType === "lxc" && configData.hostname) { + hostname = configData.hostname as string; + } + + // Build facts object + const facts: Facts = { + nodeId, + gatheredAt: new Date().toISOString(), + source: "proxmox", + facts: { + os: { + family: guestType === "lxc" ? "linux" : "unknown", + name: (configData.ostype as string) || "unknown", + release: { + full: "unknown", + major: "unknown", + }, + }, + processors: { + count: configData.cores || 1, + models: configData.cpu ? [configData.cpu as string] : [], + }, + memory: { + system: { + total: this.formatBytes(configData.memory * 1024 * 1024), + available: + statusData.status === "running" && statusData.mem !== undefined + ? this.formatBytes((configData.memory - statusData.mem / (1024 * 1024)) * 1024 * 1024) + : this.formatBytes(configData.memory * 1024 * 1024), + }, + }, + networking: { + hostname, + interfaces, + }, + categories: { + system: { + vmid: configData.vmid, + type: guestType, + status: statusData.status, + uptime: statusData.uptime, + }, + hardware: { + cores: configData.cores, + sockets: configData.sockets, + memory: configData.memory, + cpu: configData.cpu, + }, + network: { + interfaces, + }, + custom: { + bootdisk: configData.bootdisk, + scsihw: configData.scsihw, + }, + }, + }, + }; + + // Add current usage if guest is running + if (statusData.status === "running") { + facts.facts.categories!.system = { + ...facts.facts.categories!.system, + currentMemory: statusData.mem, + currentMemoryFormatted: this.formatBytes(statusData.mem || 0), + currentDisk: statusData.disk, + currentDiskFormatted: this.formatBytes(statusData.disk || 0), + networkIn: statusData.netin, + networkOut: statusData.netout, + diskRead: statusData.diskread, + diskWrite: statusData.diskwrite, + }; + } + + this.logger.debug("Transformed config and status to facts", { + component: "ProxmoxService", + operation: "transformToFacts", + metadata: { nodeId, guestType }, + }); + + return facts; + } + + /** + * Format bytes to human-readable string + * + * @param bytes - Number of bytes + * @returns Formatted string (e.g., "1.5 GB") + * @private + */ + private formatBytes(bytes: number): string { + if (bytes === 0) return "0 B"; + + const k = 1024; + const sizes = ["B", "KB", "MB", "GB", "TB"]; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + + return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`; + } + + /** + * Get list of PVE nodes in the cluster + * + * Queries the Proxmox API for all physical nodes. + * Results are cached for 60 seconds. + * + * @returns Array of node objects with name, status, and resource info + */ + async getNodes(): Promise<{ node: string; status: string; maxcpu?: number; maxmem?: number }[]> { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = "pve:nodes"; + const cached = this.cache.get(cacheKey); + if (cached) { + return cached as { node: string; status: string; maxcpu?: number; maxmem?: number }[]; + } + + try { + const result = await this.client.get("/api2/json/nodes"); + if (!Array.isArray(result)) { + throw new Error("Unexpected response format from Proxmox API"); + } + + const nodes = result.map((n: Record) => ({ + node: n.node as string, + status: n.status as string, + maxcpu: n.maxcpu as number | undefined, + maxmem: n.maxmem as number | undefined, + })); + + this.cache.set(cacheKey, nodes, 60000); + return nodes; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error("Failed to fetch PVE nodes", { + component: "ProxmoxService", + operation: "getNodes", + metadata: { error: errorMessage }, + }, error instanceof Error ? error : undefined); + throw error; + } + } + + /** + * Get the next available VMID from Proxmox cluster + * + * Proxmox provides a cluster-wide endpoint that returns the next free VMID. + * + * @returns Next available VMID number + */ + async getNextVMID(): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + try { + const result = await this.client.get("/api2/json/cluster/nextid"); + const vmid = typeof result === "string" ? parseInt(result, 10) : result as number; + if (isNaN(vmid as number)) { + throw new Error("Unexpected response format for next VMID"); + } + return vmid as number; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error("Failed to fetch next VMID", { + component: "ProxmoxService", + operation: "getNextVMID", + metadata: { error: errorMessage }, + }, error instanceof Error ? error : undefined); + throw error; + } + } + + /** + * Get ISO images available on a specific node's storage + * + * Queries the Proxmox API for ISO content on the given node. + * Results are cached for 120 seconds. + * + * @param node - PVE node name + * @param storage - Storage name (defaults to 'local') + * @returns Array of ISO image objects + */ + async getISOImages(node: string, storage = "local"): Promise<{ volid: string; format: string; size: number }[]> { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = `iso:${node}:${storage}`; + const cached = this.cache.get(cacheKey); + if (cached) { + return cached as { volid: string; format: string; size: number }[]; + } + + try { + const result = await this.client.get( + `/api2/json/nodes/${node}/storage/${storage}/content?content=iso` + ); + if (!Array.isArray(result)) { + throw new Error("Unexpected response format from Proxmox API"); + } + + const isos = result.map((item: Record) => ({ + volid: item.volid as string, + format: item.format as string, + size: item.size as number, + })); + + this.cache.set(cacheKey, isos, 120000); + return isos; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error("Failed to fetch ISO images", { + component: "ProxmoxService", + operation: "getISOImages", + metadata: { node, storage, error: errorMessage }, + }, error instanceof Error ? error : undefined); + throw error; + } + } + + /** + * Get OS templates available on a specific node's storage + * + * Queries the Proxmox API for container templates on the given node. + * Results are cached for 120 seconds. + * + * @param node - PVE node name + * @param storage - Storage name (defaults to 'local') + * @returns Array of template objects + */ + async getTemplates(node: string, storage = "local"): Promise<{ volid: string; format: string; size: number }[]> { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = `templates:${node}:${storage}`; + const cached = this.cache.get(cacheKey); + if (cached) { + return cached as { volid: string; format: string; size: number }[]; + } + + try { + const result = await this.client.get( + `/api2/json/nodes/${node}/storage/${storage}/content?content=vztmpl` + ); + if (!Array.isArray(result)) { + throw new Error("Unexpected response format from Proxmox API"); + } + + const templates = result.map((item: Record) => ({ + volid: item.volid as string, + format: item.format as string, + size: item.size as number, + })); + + this.cache.set(cacheKey, templates, 120000); + return templates; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error("Failed to fetch OS templates", { + component: "ProxmoxService", + operation: "getTemplates", + metadata: { node, storage, error: errorMessage }, + }, error instanceof Error ? error : undefined); + throw error; + } + } + + /** + * Get available storages on a node, optionally filtered by content type + * + * Queries the Proxmox API for storages on the given node. + * Results are cached for 120 seconds. + * + * @param node - PVE node name + * @param contentType - Optional content filter (e.g. 'rootdir', 'images', 'vztmpl', 'iso') + * @returns Array of storage objects + */ + async getStorages(node: string, contentType?: string): Promise<{ storage: string; type: string; content: string; active: number; total?: number; used?: number; avail?: number }[]> { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = `storages:${node}:${contentType ?? "all"}`; + const cached = this.cache.get(cacheKey); + if (cached) { + return cached as { storage: string; type: string; content: string; active: number; total?: number; used?: number; avail?: number }[]; + } + + try { + const query = contentType ? `?content=${contentType}` : ""; + const result = await this.client.get( + `/api2/json/nodes/${node}/storage${query}` + ); + if (!Array.isArray(result)) { + throw new Error("Unexpected response format from Proxmox API"); + } + + const storages = result.map((item: Record) => ({ + storage: item.storage as string, + type: item.type as string, + content: item.content as string, + active: item.active as number, + total: item.total as number | undefined, + used: item.used as number | undefined, + avail: item.avail as number | undefined, + })); + + this.cache.set(cacheKey, storages, 120000); + return storages; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error("Failed to fetch storages", { + component: "ProxmoxService", + operation: "getStorages", + metadata: { node, contentType, error: errorMessage }, + }, error instanceof Error ? error : undefined); + throw error; + } + } + + /** + * Get available network bridges/interfaces on a node + * + * Queries the Proxmox API for network devices on the given node, + * filtered to bridges only by default. + * Results are cached for 120 seconds. + * + * @param node - PVE node name + * @param type - Optional type filter (defaults to 'bridge') + * @returns Array of network interface objects + */ + async getNetworkBridges(node: string, type = "bridge"): Promise<{ iface: string; type: string; active: number; address?: string; cidr?: string; bridge_ports?: string }[]> { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + const cacheKey = `networks:${node}:${type}`; + const cached = this.cache.get(cacheKey); + if (cached) { + return cached as { iface: string; type: string; active: number; address?: string; cidr?: string; bridge_ports?: string }[]; + } + + try { + const query = type ? `?type=${type}` : ""; + const result = await this.client.get( + `/api2/json/nodes/${node}/network${query}` + ); + if (!Array.isArray(result)) { + throw new Error("Unexpected response format from Proxmox API"); + } + + const networks = result.map((item: Record) => ({ + iface: item.iface as string, + type: item.type as string, + active: item.active as number, + address: item.address as string | undefined, + cidr: item.cidr as string | undefined, + bridge_ports: item.bridge_ports as string | undefined, + })); + + this.cache.set(cacheKey, networks, 120000); + return networks; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error("Failed to fetch network bridges", { + component: "ProxmoxService", + operation: "getNetworkBridges", + metadata: { node, type, error: errorMessage }, + }, error instanceof Error ? error : undefined); + throw error; + } + } + + /** + * Clear all cached data + * + * Useful for forcing fresh data retrieval or after provisioning operations. + */ + clearCache(): void { + this.cache.clear(); + this.logger.debug("Cache cleared", { + component: "ProxmoxService", + operation: "clearCache", + }); + } + + /** + * Execute an action on a guest + * + * Routes actions to appropriate handlers based on action type. + * Supports lifecycle actions (start, stop, shutdown, reboot, suspend, resume) + * and provisioning actions (create_vm, create_lxc, destroy_vm, destroy_lxc). + * + * @param action - Action to execute + * @returns ExecutionResult with success/error details + * @throws Error if client is not initialized or action is invalid + */ + async executeAction(action: Action): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + this.logger.info("Executing action", { + component: "ProxmoxService", + operation: "executeAction", + metadata: { action: action.action, target: action.target }, + }); + + const complete = this.performanceMonitor.startTimer("proxmox:executeAction"); + + try { + let result: ExecutionResult; + + // Check if this is a provisioning action + const provisioningActions = ["create_vm", "create_lxc", "destroy_vm", "destroy_lxc"]; + if (provisioningActions.includes(action.action)) { + result = await this.executeProvisioningAction(action.action, action.parameters); + } else { + // Handle lifecycle actions + result = await this.executeLifecycleAction( + action.target as string, + action.action + ); + } + + complete({ success: result.status === "success" }); + return result; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Failed to execute action", + { + component: "ProxmoxService", + operation: "executeAction", + metadata: { action: action.action, target: action.target, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + complete({ error: errorMessage }); + throw error; + } + } + + /** + * Execute a lifecycle action on a guest + * + * Handles start, stop, shutdown, reboot, suspend, and resume actions. + * Parses the target nodeId to extract node and VMID, determines guest type, + * calls the appropriate Proxmox API endpoint, and waits for task completion. + * + * @param target - Target node ID in format proxmox:{node}:{vmid} + * @param action - Action name (start, stop, shutdown, reboot, suspend, resume) + * @returns ExecutionResult with success/error details + * @private + */ + private async executeLifecycleAction( + target: string, + action: string + ): Promise { + const startedAt = new Date().toISOString(); + + try { + // Parse target nodeId to extract node and VMID + const vmid = this.parseVMID(target); + const node = this.parseNodeName(target); + + this.logger.debug("Executing lifecycle action", { + component: "ProxmoxService", + operation: "executeLifecycleAction", + metadata: { node, vmid, action }, + }); + + // Determine guest type (qemu or lxc) + const guestType = await this.getGuestType(node, vmid); + + // Map action to API endpoint + let endpoint: string; + switch (action) { + case "start": + endpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/start` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/start`; + break; + case "stop": + endpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/stop` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/stop`; + break; + case "shutdown": + endpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/shutdown` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/shutdown`; + break; + case "reboot": + endpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/reboot` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/reboot`; + break; + case "suspend": + if (guestType === "lxc") { + throw new Error("Suspend action is not supported for LXC containers"); + } + endpoint = `/api2/json/nodes/${node}/qemu/${vmid}/status/suspend`; + break; + case "resume": + if (guestType === "lxc") { + throw new Error("Resume action is not supported for LXC containers"); + } + endpoint = `/api2/json/nodes/${node}/qemu/${vmid}/status/resume`; + break; + case "snapshot": + // Snapshot requires special handling with a name parameter + const snapshotName = `snapshot-${Date.now()}`; + endpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/snapshot` + : `/api2/json/nodes/${node}/qemu/${vmid}/snapshot`; + + // For snapshot, we need to POST with a snapname parameter + const taskId = await this.client!.post(endpoint, { snapname: snapshotName }); + + this.logger.debug("Snapshot task started", { + component: "ProxmoxService", + operation: "executeLifecycleAction", + metadata: { node, vmid, action, taskId, snapshotName }, + }); + + // Wait for task completion + await this.client!.waitForTask(node, taskId); + + const completedAt = new Date().toISOString(); + + this.logger.info("Snapshot created successfully", { + component: "ProxmoxService", + operation: "executeLifecycleAction", + metadata: { node, vmid, snapshotName }, + }); + + // Return ExecutionResult + return { + id: taskId, + type: "task", + targetNodes: [target], + action, + status: "success", + startedAt, + completedAt, + results: [ + { + nodeId: target, + status: "success", + output: { + stdout: `Snapshot ${snapshotName} created successfully`, + }, + duration: new Date(completedAt).getTime() - new Date(startedAt).getTime(), + }, + ], + }; + default: + throw new Error(`Unsupported action: ${action}`); + } + + // Execute the action + const taskId = await this.client!.post(endpoint, {}); + + this.logger.debug("Action task started", { + component: "ProxmoxService", + operation: "executeLifecycleAction", + metadata: { node, vmid, action, taskId }, + }); + + // Wait for task completion + await this.client!.waitForTask(node, taskId); + + const completedAt = new Date().toISOString(); + + this.logger.info("Lifecycle action completed successfully", { + component: "ProxmoxService", + operation: "executeLifecycleAction", + metadata: { node, vmid, action }, + }); + + // Return ExecutionResult + return { + id: taskId, + type: "task", + targetNodes: [target], + action, + status: "success", + startedAt, + completedAt, + results: [ + { + nodeId: target, + status: "success", + output: { + stdout: `Action ${action} completed successfully`, + }, + duration: new Date(completedAt).getTime() - new Date(startedAt).getTime(), + }, + ], + }; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Lifecycle action failed", + { + component: "ProxmoxService", + operation: "executeLifecycleAction", + metadata: { target, action, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + // Return ExecutionResult with error + return { + id: `error-${Date.now()}`, + type: "task", + targetNodes: [target], + action, + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + results: [ + { + nodeId: target, + status: "failed", + error: errorMessage, + duration: 0, + }, + ], + error: errorMessage, + }; + } + } + + /** + * List capabilities supported by this integration + * + * Returns all lifecycle actions that can be performed on VMs and containers. + * + * @returns Array of Capability objects + */ + listCapabilities(): Capability[] { + return [ + { + name: "start", + description: "Start a VM or container", + parameters: [], + }, + { + name: "stop", + description: "Force stop a VM or container", + parameters: [], + }, + { + name: "shutdown", + description: "Gracefully shutdown a VM or container", + parameters: [], + }, + { + name: "reboot", + description: "Reboot a VM or container", + parameters: [], + }, + { + name: "suspend", + description: "Suspend a VM (not supported for LXC containers)", + parameters: [], + }, + { + name: "resume", + description: "Resume a suspended VM (not supported for LXC containers)", + parameters: [], + }, + { + name: "snapshot", + description: "Create a snapshot of the VM or container", + parameters: [], + }, + ]; + } + + /** + * Check if a guest exists on a node + * + * Queries the Proxmox API to determine if a guest with the given VMID exists. + * + * @param node - Node name + * @param vmid - VM/Container ID + * @returns True if guest exists, false otherwise + * @private + */ + private async guestExists(node: string, vmid: number): Promise { + try { + // Try to get guest status - if it exists, this will succeed + await this.getGuestType(node, vmid); + return true; + } catch (error) { + // If guest doesn't exist, getGuestType will throw + return false; + } + } + + /** + * Create a new VM + * + * Creates a new virtual machine on the specified node with the given parameters. + * Validates VMID uniqueness before creation, waits for task completion, + * and clears inventory/groups cache after successful creation. + * + * @param params - VM creation parameters + * @returns ExecutionResult with success/error details + * @throws Error if client is not initialized + */ + async createVM(params: VMCreateParams): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + this.logger.info("Creating VM", { + component: "ProxmoxService", + operation: "createVM", + metadata: { vmid: params.vmid, node: params.node, name: params.name }, + }); + + const complete = this.performanceMonitor.startTimer("proxmox:createVM"); + const startedAt = new Date().toISOString(); + + try { + // Validate VMID is unique + const exists = await this.guestExists(params.node, params.vmid); + if (exists) { + const errorMessage = `VM with VMID ${params.vmid} already exists on node ${params.node}`; + this.logger.warn(errorMessage, { + component: "ProxmoxService", + operation: "createVM", + metadata: { vmid: params.vmid, node: params.node }, + }); + + complete({ error: errorMessage }); + return { + id: `error-${Date.now()}`, + type: "task", + targetNodes: [`proxmox:${params.node}:${params.vmid}`], + action: "create_vm", + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + error: errorMessage, + results: [], + }; + } + + // Call Proxmox API to create VM + const endpoint = `/api2/json/nodes/${params.node}/qemu`; + // Strip 'node' from the payload — it's already in the URL path + // and Proxmox rejects unknown parameters + const { node: _node, ...apiPayload } = params; + const taskId = await this.client.post(endpoint, apiPayload); + + this.logger.debug("VM creation task started", { + component: "ProxmoxService", + operation: "createVM", + metadata: { vmid: params.vmid, node: params.node, taskId }, + }); + + // Wait for task completion + await this.client.waitForTask(params.node, taskId); + + const completedAt = new Date().toISOString(); + + // Clear inventory and groups cache + this.cache.delete("inventory:all"); + this.cache.delete("groups:all"); + + this.logger.info("VM created successfully", { + component: "ProxmoxService", + operation: "createVM", + metadata: { vmid: params.vmid, node: params.node }, + }); + + complete({ success: true, vmid: params.vmid }); + + return { + id: taskId, + type: "task", + targetNodes: [`proxmox:${params.node}:${params.vmid}`], + action: "create_vm", + status: "success", + startedAt, + completedAt, + results: [ + { + nodeId: `proxmox:${params.node}:${params.vmid}`, + status: "success", + output: { + stdout: `VM ${params.vmid} created successfully`, + }, + duration: new Date(completedAt).getTime() - new Date(startedAt).getTime(), + }, + ], + }; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Failed to create VM", + { + component: "ProxmoxService", + operation: "createVM", + metadata: { vmid: params.vmid, node: params.node, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + complete({ error: errorMessage }); + + return { + id: `error-${Date.now()}`, + type: "task", + targetNodes: [`proxmox:${params.node}:${params.vmid}`], + action: "create_vm", + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + error: errorMessage, + results: [], + }; + } + } + + /** + * Create a new LXC container + * + * Creates a new LXC container on the specified node with the given parameters. + * Validates VMID uniqueness before creation, waits for task completion, + * and clears inventory/groups cache after successful creation. + * + * @param params - LXC creation parameters + * @returns ExecutionResult with success/error details + * @throws Error if client is not initialized + */ + async createLXC(params: LXCCreateParams): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + this.logger.info("Creating LXC container", { + component: "ProxmoxService", + operation: "createLXC", + metadata: { vmid: params.vmid, node: params.node, hostname: params.hostname }, + }); + + const complete = this.performanceMonitor.startTimer("proxmox:createLXC"); + const startedAt = new Date().toISOString(); + + try { + // Validate VMID is unique + const exists = await this.guestExists(params.node, params.vmid); + if (exists) { + const errorMessage = `Container with VMID ${params.vmid} already exists on node ${params.node}`; + this.logger.warn(errorMessage, { + component: "ProxmoxService", + operation: "createLXC", + metadata: { vmid: params.vmid, node: params.node }, + }); + + complete({ error: errorMessage }); + return { + id: `error-${Date.now()}`, + type: "task", + targetNodes: [`proxmox:${params.node}:${params.vmid}`], + action: "create_lxc", + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + error: errorMessage, + results: [], + }; + } + + // Call Proxmox API to create LXC + const endpoint = `/api2/json/nodes/${params.node}/lxc`; + // Strip 'node' from the payload — it's already in the URL path + // and Proxmox rejects unknown parameters + const { node: _node, ...apiPayload } = params; + const taskId = await this.client.post(endpoint, apiPayload); + + this.logger.debug("LXC creation task started", { + component: "ProxmoxService", + operation: "createLXC", + metadata: { vmid: params.vmid, node: params.node, taskId }, + }); + + // Wait for task completion + await this.client.waitForTask(params.node, taskId); + + const completedAt = new Date().toISOString(); + + // Clear inventory and groups cache + this.cache.delete("inventory:all"); + this.cache.delete("groups:all"); + + this.logger.info("LXC container created successfully", { + component: "ProxmoxService", + operation: "createLXC", + metadata: { vmid: params.vmid, node: params.node }, + }); + + complete({ success: true, vmid: params.vmid }); + + return { + id: taskId, + type: "task", + targetNodes: [`proxmox:${params.node}:${params.vmid}`], + action: "create_lxc", + status: "success", + startedAt, + completedAt, + results: [ + { + nodeId: `proxmox:${params.node}:${params.vmid}`, + status: "success", + output: { + stdout: `Container ${params.vmid} created successfully`, + }, + duration: new Date(completedAt).getTime() - new Date(startedAt).getTime(), + }, + ], + }; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Failed to create LXC container", + { + component: "ProxmoxService", + operation: "createLXC", + metadata: { vmid: params.vmid, node: params.node, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + complete({ error: errorMessage }); + + return { + id: `error-${Date.now()}`, + type: "task", + targetNodes: [`proxmox:${params.node}:${params.vmid}`], + action: "create_lxc", + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + error: errorMessage, + results: [], + }; + } + } + + /** + * Destroy a guest (VM or LXC container) + * + * Destroys a guest by first stopping it if running, then deleting it. + * Clears all related caches (inventory, groups, facts) after successful deletion. + * + * @param node - Node name + * @param vmid - VM/Container ID + * @returns ExecutionResult with success/error details + * @throws Error if client is not initialized + */ + async destroyGuest(node: string, vmid: number): Promise { + if (!this.client) { + throw new Error("ProxmoxClient not initialized"); + } + + this.logger.info("Destroying guest", { + component: "ProxmoxService", + operation: "destroyGuest", + metadata: { node, vmid }, + }); + + const complete = this.performanceMonitor.startTimer("proxmox:destroyGuest"); + const startedAt = new Date().toISOString(); + const nodeId = `proxmox:${node}:${vmid}`; + + try { + // Check if guest exists + const exists = await this.guestExists(node, vmid); + if (!exists) { + const errorMessage = `Guest ${vmid} not found on node ${node}`; + this.logger.warn(errorMessage, { + component: "ProxmoxService", + operation: "destroyGuest", + metadata: { node, vmid }, + }); + + complete({ error: errorMessage }); + return { + id: `error-${Date.now()}`, + type: "task", + targetNodes: [nodeId], + action: "destroy_guest", + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + error: errorMessage, + results: [], + }; + } + + // Determine guest type + const guestType = await this.getGuestType(node, vmid); + + // Check if guest is running and stop it first + const statusEndpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/current` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/current`; + + const status = await this.client.get(statusEndpoint) as ProxmoxGuestStatus; + + if (status.status === "running") { + this.logger.debug("Stopping guest before deletion", { + component: "ProxmoxService", + operation: "destroyGuest", + metadata: { node, vmid, guestType }, + }); + + const stopEndpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}/status/stop` + : `/api2/json/nodes/${node}/qemu/${vmid}/status/stop`; + + const stopTaskId = await this.client.post(stopEndpoint, {}); + await this.client.waitForTask(node, stopTaskId); + + this.logger.debug("Guest stopped successfully", { + component: "ProxmoxService", + operation: "destroyGuest", + metadata: { node, vmid }, + }); + } + + // Delete guest + const deleteEndpoint = guestType === "lxc" + ? `/api2/json/nodes/${node}/lxc/${vmid}` + : `/api2/json/nodes/${node}/qemu/${vmid}`; + + const deleteTaskId = await this.client.delete(deleteEndpoint); + await this.client.waitForTask(node, deleteTaskId); + + const completedAt = new Date().toISOString(); + + // Clear all related caches + this.cache.delete("inventory:all"); + this.cache.delete("groups:all"); + this.cache.delete(`facts:${nodeId}`); + + this.logger.info("Guest destroyed successfully", { + component: "ProxmoxService", + operation: "destroyGuest", + metadata: { node, vmid }, + }); + + complete({ success: true }); + + return { + id: deleteTaskId, + type: "task", + targetNodes: [nodeId], + action: "destroy_guest", + status: "success", + startedAt, + completedAt, + results: [ + { + nodeId, + status: "success", + output: { + stdout: `Guest ${vmid} destroyed successfully`, + }, + duration: new Date(completedAt).getTime() - new Date(startedAt).getTime(), + }, + ], + }; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + this.logger.error( + "Failed to destroy guest", + { + component: "ProxmoxService", + operation: "destroyGuest", + metadata: { node, vmid, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + complete({ error: errorMessage }); + + return { + id: `error-${Date.now()}`, + type: "task", + targetNodes: [nodeId], + action: "destroy_guest", + status: "failed", + startedAt, + completedAt: new Date().toISOString(), + error: errorMessage, + results: [], + }; + } + } + + /** + * Execute a provisioning action + * + * Routes provisioning actions (create_vm, create_lxc, destroy_vm, destroy_lxc) + * to the appropriate handler methods. + * + * @param action - Action name + * @param params - Action parameters + * @returns ExecutionResult with success/error details + * @private + */ + private async executeProvisioningAction( + action: string, + params: unknown + ): Promise { + switch (action) { + case "create_vm": + return await this.createVM(params as VMCreateParams); + case "create_lxc": + return await this.createLXC(params as LXCCreateParams); + case "destroy_vm": + case "destroy_lxc": { + const destroyParams = params as { node: string; vmid: number }; + if (!destroyParams.node || !destroyParams.vmid) { + throw new Error("destroy action requires node and vmid parameters"); + } + return await this.destroyGuest(destroyParams.node, destroyParams.vmid); + } + default: + throw new Error(`Unsupported provisioning action: ${action}`); + } + } + + /** + * List provisioning capabilities + * + * Returns all provisioning capabilities supported by this integration, + * including VM and LXC creation and destruction. + * + * @returns Array of ProvisioningCapability objects + */ + listProvisioningCapabilities(): ProvisioningCapability[] { + return [ + { + name: "create_vm", + description: "Create a new virtual machine", + operation: "create", + parameters: [ + { name: "vmid", type: "number", required: true }, + { name: "name", type: "string", required: true }, + { name: "node", type: "string", required: true }, + { name: "cores", type: "number", required: false, default: 1 }, + { name: "memory", type: "number", required: false, default: 512 }, + { name: "disk", type: "string", required: false }, + { name: "network", type: "object", required: false }, + ], + }, + { + name: "create_lxc", + description: "Create a new LXC container", + operation: "create", + parameters: [ + { name: "vmid", type: "number", required: true }, + { name: "hostname", type: "string", required: true }, + { name: "node", type: "string", required: true }, + { name: "ostemplate", type: "string", required: true }, + { name: "cores", type: "number", required: false, default: 1 }, + { name: "memory", type: "number", required: false, default: 512 }, + { name: "rootfs", type: "string", required: false }, + { name: "network", type: "object", required: false }, + ], + }, + { + name: "destroy_vm", + description: "Destroy a virtual machine", + operation: "destroy", + parameters: [ + { name: "vmid", type: "number", required: true }, + { name: "node", type: "string", required: true }, + ], + }, + { + name: "destroy_lxc", + description: "Destroy an LXC container", + operation: "destroy", + parameters: [ + { name: "vmid", type: "number", required: true }, + { name: "node", type: "string", required: true }, + ], + }, + ]; + } + + +} diff --git a/backend/src/integrations/proxmox/__tests__/ProxmoxIntegration.test.ts b/backend/src/integrations/proxmox/__tests__/ProxmoxIntegration.test.ts new file mode 100644 index 00000000..2fa36542 --- /dev/null +++ b/backend/src/integrations/proxmox/__tests__/ProxmoxIntegration.test.ts @@ -0,0 +1,601 @@ +/** + * ProxmoxIntegration Unit Tests + * + * Tests for the ProxmoxIntegration plugin class. + */ + +import { describe, it, expect, beforeEach, vi } from "vitest"; +import { ProxmoxIntegration } from "../ProxmoxIntegration"; +import type { IntegrationConfig } from "../../types"; +import type { LoggerService } from "../../../services/LoggerService"; +import type { PerformanceMonitorService } from "../../../services/PerformanceMonitorService"; + +// Mock ProxmoxService +const mockService = { + initialize: vi.fn().mockResolvedValue(undefined), + healthCheck: vi.fn(), + getInventory: vi.fn(), + getGroups: vi.fn(), + getNodeFacts: vi.fn(), + executeAction: vi.fn(), + listCapabilities: vi.fn(), + listProvisioningCapabilities: vi.fn(), +}; + +vi.mock("../ProxmoxService", () => ({ + ProxmoxService: class { + initialize = mockService.initialize; + healthCheck = mockService.healthCheck; + getInventory = mockService.getInventory; + getGroups = mockService.getGroups; + getNodeFacts = mockService.getNodeFacts; + executeAction = mockService.executeAction; + listCapabilities = mockService.listCapabilities; + listProvisioningCapabilities = mockService.listProvisioningCapabilities; + }, +})); + +describe("ProxmoxIntegration", () => { + let plugin: ProxmoxIntegration; + let mockLogger: LoggerService; + let mockPerfMonitor: PerformanceMonitorService; + + beforeEach(() => { + // Reset mocks + vi.clearAllMocks(); + + // Create mock logger + mockLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + } as unknown as LoggerService; + + // Create mock performance monitor + const mockComplete = vi.fn(); + mockPerfMonitor = { + startTimer: vi.fn(() => mockComplete), + } as unknown as PerformanceMonitorService; + + // Create plugin instance + plugin = new ProxmoxIntegration(mockLogger, mockPerfMonitor); + }); + + describe("initialization", () => { + it("should initialize with valid configuration", async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + username: "root", + password: "password", // pragma: allowlist secret + realm: "pam", + }, + }; + + await plugin.initialize(config); + + expect(plugin.isInitialized()).toBe(true); + expect(mockService.initialize).toHaveBeenCalledOnce(); + }); + + it("should throw error for missing host", async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + port: 8006, + username: "root", + password: "password", // pragma: allowlist secret + realm: "pam", + }, + }; + + await expect(plugin.initialize(config)).rejects.toThrow( + "Proxmox configuration must include a valid host" + ); + }); + + it("should throw error for invalid port", async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 70000, + username: "root", + password: "password", // pragma: allowlist secret + realm: "pam", + }, + }; + + await expect(plugin.initialize(config)).rejects.toThrow( + "Proxmox port must be between 1 and 65535" + ); + }); + + it("should throw error for missing authentication", async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + username: "root", + realm: "pam", + }, + }; + + await expect(plugin.initialize(config)).rejects.toThrow( + "Proxmox configuration must include either token or password authentication" + ); + }); + + it("should throw error for missing realm with password auth", async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + username: "root", + password: "password", // pragma: allowlist secret + }, + }; + + await expect(plugin.initialize(config)).rejects.toThrow( + "Proxmox password authentication requires a realm" + ); + }); + + it("should log warning when SSL verification is disabled", async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + username: "root", + password: "password", // pragma: allowlist secret + realm: "pam", + ssl: { + rejectUnauthorized: false, + }, + }, + }; + + await plugin.initialize(config); + + expect(mockLogger.warn).toHaveBeenCalledWith( + "TLS certificate verification is disabled - this is insecure", + expect.objectContaining({ + component: "ProxmoxIntegration", + operation: "validateProxmoxConfig", + }) + ); + }); + + it("should accept token authentication", async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + token: "user@realm!tokenid=uuid", + }, + }; + + await plugin.initialize(config); + + expect(plugin.isInitialized()).toBe(true); + expect(mockService.initialize).toHaveBeenCalledOnce(); + }); + }); + + describe("health check", () => { + beforeEach(async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + token: "user@realm!tokenid=uuid", + }, + }; + await plugin.initialize(config); + }); + + it("should delegate health check to service", async () => { + mockService.healthCheck.mockResolvedValue({ + healthy: true, + message: "Proxmox API is reachable", + }); + + const health = await plugin.healthCheck(); + + expect(health.healthy).toBe(true); + expect(mockService.healthCheck).toHaveBeenCalledOnce(); + }); + }); + + describe("InformationSourcePlugin methods", () => { + beforeEach(async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + token: "user@realm!tokenid=uuid", + }, + }; + await plugin.initialize(config); + }); + + it("should delegate getInventory to service", async () => { + const mockNodes = [ + { + id: "proxmox:pve1:100", + name: "test-vm", + uri: "proxmox://pve1/100", + transport: "ssh" as const, + config: {}, + source: "proxmox", + }, + ]; + mockService.getInventory.mockResolvedValue(mockNodes); + + const inventory = await plugin.getInventory(); + + expect(inventory).toEqual(mockNodes); + expect(mockService.getInventory).toHaveBeenCalledOnce(); + }); + + it("should delegate getGroups to service", async () => { + const mockGroups = [ + { + id: "proxmox:node:pve1", + name: "Proxmox Node: pve1", + source: "proxmox", + sources: ["proxmox"], + linked: false, + nodes: ["proxmox:pve1:100"], + }, + ]; + mockService.getGroups.mockResolvedValue(mockGroups); + + const groups = await plugin.getGroups(); + + expect(groups).toEqual(mockGroups); + expect(mockService.getGroups).toHaveBeenCalledOnce(); + }); + + it("should delegate getNodeFacts to service", async () => { + const mockFacts = { + nodeId: "proxmox:pve1:100", + gatheredAt: "2024-01-01T00:00:00Z", + source: "proxmox", + facts: { + os: { family: "linux", name: "ubuntu", release: { full: "22.04", major: "22" } }, + processors: { count: 2, models: [] }, + memory: { system: { total: "2 GB", available: "1 GB" } }, + networking: { hostname: "test-vm", interfaces: {} }, + }, + }; + mockService.getNodeFacts.mockResolvedValue(mockFacts); + + const facts = await plugin.getNodeFacts("proxmox:pve1:100"); + + expect(facts).toEqual(mockFacts); + expect(mockService.getNodeFacts).toHaveBeenCalledWith("proxmox:pve1:100"); + }); + + it("should return null for getNodeData", async () => { + const data = await plugin.getNodeData("proxmox:pve1:100", "reports"); + + expect(data).toBeNull(); + }); + }); + + describe("ExecutionToolPlugin methods", () => { + beforeEach(async () => { + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + token: "user@realm!tokenid=uuid", + }, + }; + await plugin.initialize(config); + }); + + it("should delegate executeAction to service", async () => { + const mockResult = { + id: "task-123", + type: "task" as const, + targetNodes: ["proxmox:pve1:100"], + action: "start", + status: "success" as const, + startedAt: "2024-01-01T00:00:00Z", + completedAt: "2024-01-01T00:00:05Z", + results: [], + }; + mockService.executeAction.mockResolvedValue(mockResult); + + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "start", + }; + + const result = await plugin.executeAction(action); + + expect(result).toEqual(mockResult); + expect(mockService.executeAction).toHaveBeenCalledWith(action); + }); + + it("should delegate listCapabilities to service", () => { + const mockCapabilities = [ + { name: "start", description: "Start a VM or container", parameters: [] }, + { name: "stop", description: "Force stop a VM or container", parameters: [] }, + ]; + mockService.listCapabilities.mockReturnValue(mockCapabilities); + + const capabilities = plugin.listCapabilities(); + + expect(capabilities).toEqual(mockCapabilities); + expect(mockService.listCapabilities).toHaveBeenCalledOnce(); + }); + + it("should delegate listProvisioningCapabilities to service", () => { + const mockCapabilities = [ + { + name: "create_vm", + description: "Create a new virtual machine", + operation: "create" as const, + parameters: [], + }, + ]; + mockService.listProvisioningCapabilities.mockReturnValue(mockCapabilities); + + const capabilities = plugin.listProvisioningCapabilities(); + + expect(capabilities).toEqual(mockCapabilities); + expect(mockService.listProvisioningCapabilities).toHaveBeenCalledOnce(); + }); + }); + + describe("journal integration", () => { + let mockJournalService: { recordEvent: ReturnType }; + + beforeEach(async () => { + mockJournalService = { + recordEvent: vi.fn().mockResolvedValue("entry-id"), + }; + + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + token: "user@realm!tokenid=uuid", + }, + }; + await plugin.initialize(config); + plugin.setJournalService(mockJournalService as never); + }); + + it("should record journal entry on successful action", async () => { + const mockResult = { + id: "task-123", + type: "task" as const, + targetNodes: ["proxmox:pve1:100"], + action: "start", + status: "success" as const, + startedAt: "2024-01-01T00:00:00Z", + completedAt: "2024-01-01T00:00:05Z", + results: [], + }; + mockService.executeAction.mockResolvedValue(mockResult); + + await plugin.executeAction({ + type: "task", + target: "proxmox:pve1:100", + action: "start", + }); + + expect(mockJournalService.recordEvent).toHaveBeenCalledOnce(); + expect(mockJournalService.recordEvent).toHaveBeenCalledWith( + expect.objectContaining({ + nodeId: "proxmox:pve1:100", + eventType: "start", + source: "proxmox", + action: "start", + }) + ); + }); + + it("should record journal entry on failed action result", async () => { + const mockResult = { + id: "error-123", + type: "task" as const, + targetNodes: ["proxmox:pve1:100"], + action: "stop", + status: "failed" as const, + startedAt: "2024-01-01T00:00:00Z", + completedAt: "2024-01-01T00:00:01Z", + results: [], + error: "Guest not found", + }; + mockService.executeAction.mockResolvedValue(mockResult); + + await plugin.executeAction({ + type: "task", + target: "proxmox:pve1:100", + action: "stop", + }); + + expect(mockJournalService.recordEvent).toHaveBeenCalledOnce(); + expect(mockJournalService.recordEvent).toHaveBeenCalledWith( + expect.objectContaining({ + source: "proxmox", + action: "stop", + summary: expect.stringContaining("failed"), + }) + ); + }); + + it("should record journal entry when service throws", async () => { + mockService.executeAction.mockRejectedValue(new Error("API unreachable")); + + await expect( + plugin.executeAction({ + type: "task", + target: "proxmox:pve1:100", + action: "reboot", + }) + ).rejects.toThrow("API unreachable"); + + expect(mockJournalService.recordEvent).toHaveBeenCalledOnce(); + expect(mockJournalService.recordEvent).toHaveBeenCalledWith( + expect.objectContaining({ + source: "proxmox", + eventType: "reboot", + summary: expect.stringContaining("API unreachable"), + }) + ); + }); + + it("should map provisioning actions to correct event types", async () => { + const mockResult = { + id: "task-456", + type: "task" as const, + targetNodes: ["proxmox:pve1:101"], + action: "create_vm", + status: "success" as const, + startedAt: "2024-01-01T00:00:00Z", + completedAt: "2024-01-01T00:00:10Z", + results: [], + }; + mockService.executeAction.mockResolvedValue(mockResult); + + await plugin.executeAction({ + type: "task", + target: "proxmox:pve1:101", + action: "create_vm", + }); + + expect(mockJournalService.recordEvent).toHaveBeenCalledWith( + expect.objectContaining({ + eventType: "provision", + source: "proxmox", + }) + ); + }); + + it("should not fail if journal recording throws", async () => { + mockJournalService.recordEvent.mockRejectedValue(new Error("DB error")); + const mockResult = { + id: "task-789", + type: "task" as const, + targetNodes: ["proxmox:pve1:100"], + action: "start", + status: "success" as const, + startedAt: "2024-01-01T00:00:00Z", + completedAt: "2024-01-01T00:00:05Z", + results: [], + }; + mockService.executeAction.mockResolvedValue(mockResult); + + const result = await plugin.executeAction({ + type: "task", + target: "proxmox:pve1:100", + action: "start", + }); + + expect(result.status).toBe("success"); + }); + + it("should work without journal service set", async () => { + // Create a fresh plugin without journal service + const freshPlugin = new ProxmoxIntegration(mockLogger, mockPerfMonitor); + const config: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: { + host: "proxmox.example.com", + port: 8006, + token: "user@realm!tokenid=uuid", + }, + }; + await freshPlugin.initialize(config); + + const mockResult = { + id: "task-000", + type: "task" as const, + targetNodes: ["proxmox:pve1:100"], + action: "start", + status: "success" as const, + startedAt: "2024-01-01T00:00:00Z", + completedAt: "2024-01-01T00:00:05Z", + results: [], + }; + mockService.executeAction.mockResolvedValue(mockResult); + + const result = await freshPlugin.executeAction({ + type: "task", + target: "proxmox:pve1:100", + action: "start", + }); + + expect(result.status).toBe("success"); + }); + }); + + describe("error handling", () => { + it("should throw error when calling methods before initialization", async () => { + await expect(plugin.getInventory()).rejects.toThrow( + "Proxmox integration is not initialized" + ); + await expect(plugin.getGroups()).rejects.toThrow( + "Proxmox integration is not initialized" + ); + await expect(plugin.getNodeFacts("proxmox:pve1:100")).rejects.toThrow( + "Proxmox integration is not initialized" + ); + await expect( + plugin.executeAction({ + type: "task", + target: "proxmox:pve1:100", + action: "start", + }) + ).rejects.toThrow("Proxmox integration is not initialized"); + expect(() => plugin.listCapabilities()).toThrow( + "Proxmox integration is not initialized" + ); + expect(() => plugin.listProvisioningCapabilities()).toThrow( + "Proxmox integration is not initialized" + ); + }); + }); +}); diff --git a/backend/src/integrations/proxmox/__tests__/ProxmoxService.test.ts b/backend/src/integrations/proxmox/__tests__/ProxmoxService.test.ts new file mode 100644 index 00000000..66f73dde --- /dev/null +++ b/backend/src/integrations/proxmox/__tests__/ProxmoxService.test.ts @@ -0,0 +1,1277 @@ +/** + * ProxmoxService Unit Tests + * + * Tests for the ProxmoxService business logic layer. + */ + +import { describe, it, expect, beforeEach, vi } from "vitest"; +import { ProxmoxService } from "../ProxmoxService"; +import type { ProxmoxConfig, ProxmoxGuest } from "../types"; +import type { LoggerService } from "../../../services/LoggerService"; +import type { PerformanceMonitorService } from "../../../services/PerformanceMonitorService"; + +// Create mock client +const mockClient = { + authenticate: vi.fn().mockResolvedValue(undefined), + get: vi.fn(), + post: vi.fn(), + delete: vi.fn(), + waitForTask: vi.fn(), +}; + +// Mock ProxmoxClient module +vi.mock("../ProxmoxClient", () => ({ + ProxmoxClient: class { + authenticate = mockClient.authenticate; + get = mockClient.get; + post = mockClient.post; + delete = mockClient.delete; + waitForTask = mockClient.waitForTask; + }, +})); + +describe("ProxmoxService", () => { + let service: ProxmoxService; + let mockLogger: LoggerService; + let mockPerfMonitor: PerformanceMonitorService; + let mockConfig: ProxmoxConfig; + + beforeEach(() => { + // Reset mocks + vi.clearAllMocks(); + + // Create mock logger + mockLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + } as unknown as LoggerService; + + // Create mock performance monitor + const mockComplete = vi.fn(); + mockPerfMonitor = { + startTimer: vi.fn(() => mockComplete), + } as unknown as PerformanceMonitorService; + + // Create mock config + mockConfig = { + host: "proxmox.example.com", + port: 8006, + username: "root", + password: "password", // pragma: allowlist secret + realm: "pam", + }; + + // Create service instance + service = new ProxmoxService(mockConfig, mockLogger, mockPerfMonitor); + }); + + describe("getInventory", () => { + it("should fetch and transform inventory from Proxmox API", async () => { + // Mock API response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm-1", + node: "pve1", + type: "qemu", + status: "running", + maxmem: 2147483648, + cpus: 2, + uptime: 3600, + }, + { + vmid: 101, + name: "test-container-1", + node: "pve1", + type: "lxc", + status: "stopped", + maxmem: 536870912, + cpus: 1, + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // Call getInventory + const nodes = await service.getInventory(); + + // Verify API call + expect(mockClient.get).toHaveBeenCalledWith( + "/api2/json/cluster/resources?type=vm" + ); + + // Verify results + expect(nodes).toHaveLength(2); + + // Verify first node (VM) + expect(nodes[0]).toMatchObject({ + id: "proxmox:pve1:100", + name: "test-vm-1", + uri: "proxmox://pve1/100", + transport: "ssh", + source: "proxmox", + status: "running", + }); + expect(nodes[0].metadata).toMatchObject({ + vmid: 100, + node: "pve1", + type: "qemu", + status: "running", + maxmem: 2147483648, + cpus: 2, + uptime: 3600, + }); + + // Verify second node (LXC) + expect(nodes[1]).toMatchObject({ + id: "proxmox:pve1:101", + name: "test-container-1", + uri: "proxmox://pve1/101", + transport: "ssh", + source: "proxmox", + status: "stopped", + }); + expect(nodes[1].metadata).toMatchObject({ + vmid: 101, + node: "pve1", + type: "lxc", + status: "stopped", + maxmem: 536870912, + cpus: 1, + }); + + // Verify performance monitoring + expect(mockPerfMonitor.startTimer).toHaveBeenCalledWith( + "proxmox:getInventory" + ); + }); + + it("should return cached inventory on subsequent calls within TTL", async () => { + // Mock API response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm-1", + node: "pve1", + type: "qemu", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // First call - should hit API + const nodes1 = await service.getInventory(); + expect(mockClient.get).toHaveBeenCalledTimes(1); + expect(nodes1).toHaveLength(1); + + // Second call - should use cache + const nodes2 = await service.getInventory(); + expect(mockClient.get).toHaveBeenCalledTimes(1); // Still 1, not called again + expect(nodes2).toHaveLength(1); + expect(nodes2).toEqual(nodes1); + }); + + it("should throw error if client is not initialized", async () => { + // Don't initialize service + await expect(service.getInventory()).rejects.toThrow( + "ProxmoxClient not initialized" + ); + }); + + it("should throw error if API returns non-array response", async () => { + // Mock invalid API response + mockClient.get.mockResolvedValue({ invalid: "response" }); + + // Initialize service + await service.initialize(); + + // Call should throw + await expect(service.getInventory()).rejects.toThrow( + "Unexpected response format from Proxmox API" + ); + }); + + it("should handle empty inventory", async () => { + // Mock empty API response + mockClient.get.mockResolvedValue([]); + + // Initialize service + await service.initialize(); + + // Call getInventory + const nodes = await service.getInventory(); + + // Verify empty result + expect(nodes).toHaveLength(0); + expect(nodes).toEqual([]); + }); + + it("should omit optional fields when not present", async () => { + // Mock API response with minimal fields + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "minimal-vm", + node: "pve1", + type: "qemu", + status: "stopped", + // No optional fields + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // Call getInventory + const nodes = await service.getInventory(); + + // Verify node has only required fields in metadata + expect(nodes[0].metadata).toMatchObject({ + vmid: 100, + node: "pve1", + type: "qemu", + status: "stopped", + }); + + // Verify optional fields are not present + expect(nodes[0].metadata).not.toHaveProperty("maxmem"); + expect(nodes[0].metadata).not.toHaveProperty("cpus"); + expect(nodes[0].metadata).not.toHaveProperty("uptime"); + }); + }); + + describe("getGroups", () => { + it("should create groups by node, status, and type", async () => { + // Mock API response with diverse guests + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "vm-1", + node: "pve1", + type: "qemu", + status: "running", + }, + { + vmid: 101, + name: "vm-2", + node: "pve1", + type: "qemu", + status: "stopped", + }, + { + vmid: 200, + name: "container-1", + node: "pve2", + type: "lxc", + status: "running", + }, + { + vmid: 201, + name: "container-2", + node: "pve2", + type: "lxc", + status: "paused", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // Call getGroups + const groups = await service.getGroups(); + + // Verify we have groups for nodes, statuses, and types + // 2 nodes (pve1, pve2) + 3 statuses (running, stopped, paused) + 2 types (qemu, lxc) = 7 groups + expect(groups).toHaveLength(7); + + // Verify node groups + const pve1Group = groups.find((g) => g.id === "proxmox:node:pve1"); + expect(pve1Group).toBeDefined(); + expect(pve1Group?.name).toBe("Proxmox Node: pve1"); + expect(pve1Group?.nodes).toHaveLength(2); + expect(pve1Group?.nodes).toContain("proxmox:pve1:100"); + expect(pve1Group?.nodes).toContain("proxmox:pve1:101"); + + const pve2Group = groups.find((g) => g.id === "proxmox:node:pve2"); + expect(pve2Group).toBeDefined(); + expect(pve2Group?.name).toBe("Proxmox Node: pve2"); + expect(pve2Group?.nodes).toHaveLength(2); + + // Verify status groups + const runningGroup = groups.find((g) => g.id === "proxmox:status:running"); + expect(runningGroup).toBeDefined(); + expect(runningGroup?.name).toBe("Status: running"); + expect(runningGroup?.nodes).toHaveLength(2); + + const stoppedGroup = groups.find((g) => g.id === "proxmox:status:stopped"); + expect(stoppedGroup).toBeDefined(); + expect(stoppedGroup?.nodes).toHaveLength(1); + + const pausedGroup = groups.find((g) => g.id === "proxmox:status:paused"); + expect(pausedGroup).toBeDefined(); + expect(pausedGroup?.nodes).toHaveLength(1); + + // Verify type groups + const qemuGroup = groups.find((g) => g.id === "proxmox:type:qemu"); + expect(qemuGroup).toBeDefined(); + expect(qemuGroup?.name).toBe("Proxmox VMs"); + expect(qemuGroup?.nodes).toHaveLength(2); + + const lxcGroup = groups.find((g) => g.id === "proxmox:type:lxc"); + expect(lxcGroup).toBeDefined(); + expect(lxcGroup?.name).toBe("Proxmox Containers"); + expect(lxcGroup?.nodes).toHaveLength(2); + + // Verify all groups have correct source + groups.forEach((group) => { + expect(group.source).toBe("proxmox"); + expect(group.sources).toEqual(["proxmox"]); + expect(group.linked).toBe(false); + }); + }); + + it("should return cached groups on subsequent calls within TTL", async () => { + // Mock API response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "vm-1", + node: "pve1", + type: "qemu", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // First call - should hit API + const groups1 = await service.getGroups(); + expect(mockClient.get).toHaveBeenCalledTimes(1); + expect(groups1.length).toBeGreaterThan(0); + + // Second call - should use cache + const groups2 = await service.getGroups(); + expect(mockClient.get).toHaveBeenCalledTimes(1); // Still 1, not called again + expect(groups2).toEqual(groups1); + }); + + it("should throw error if client is not initialized", async () => { + // Don't initialize service + await expect(service.getGroups()).rejects.toThrow( + "ProxmoxClient not initialized" + ); + }); + + it("should handle empty inventory gracefully", async () => { + // Mock empty API response + mockClient.get.mockResolvedValue([]); + + // Initialize service + await service.initialize(); + + // Call getGroups + const groups = await service.getGroups(); + + // Should return empty array + expect(groups).toHaveLength(0); + expect(groups).toEqual([]); + }); + + it("should use correct group ID formats", async () => { + // Mock API response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "testnode", + type: "qemu", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // Call getGroups + const groups = await service.getGroups(); + + // Verify ID formats + const nodeGroup = groups.find((g) => g.id.startsWith("proxmox:node:")); + expect(nodeGroup?.id).toBe("proxmox:node:testnode"); + + const statusGroup = groups.find((g) => g.id.startsWith("proxmox:status:")); + expect(statusGroup?.id).toBe("proxmox:status:running"); + + const typeGroup = groups.find((g) => g.id.startsWith("proxmox:type:")); + expect(typeGroup?.id).toBe("proxmox:type:qemu"); + }); + }); + + describe("executeAction", () => { + it("should execute start action on a VM", async () => { + // Mock cluster resources response to determine guest type + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "pve1", + type: "qemu", + status: "stopped", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + mockClient.post.mockResolvedValue("UPID:pve1:00001234:task123"); + mockClient.waitForTask.mockResolvedValue(undefined); + + // Initialize service + await service.initialize(); + + // Execute start action + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "start", + }; + + const result = await service.executeAction(action); + + // Verify API calls + expect(mockClient.get).toHaveBeenCalledWith( + "/api2/json/cluster/resources?type=vm" + ); + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100/status/start", + {} + ); + expect(mockClient.waitForTask).toHaveBeenCalledWith( + "pve1", + "UPID:pve1:00001234:task123" + ); + + // Verify result + expect(result.status).toBe("success"); + expect(result.targetNodes).toEqual(["proxmox:pve1:100"]); + expect(result.action).toBe("start"); + expect(result.results).toHaveLength(1); + expect(result.results[0].status).toBe("success"); + }); + + it("should execute stop action on an LXC container", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 200, + name: "test-container", + node: "pve2", + type: "lxc", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + mockClient.post.mockResolvedValue("UPID:pve2:00005678:task456"); + mockClient.waitForTask.mockResolvedValue(undefined); + + // Initialize service + await service.initialize(); + + // Execute stop action + const action = { + type: "task" as const, + target: "proxmox:pve2:200", + action: "stop", + }; + + const result = await service.executeAction(action); + + // Verify API calls + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve2/lxc/200/status/stop", + {} + ); + + // Verify result + expect(result.status).toBe("success"); + expect(result.targetNodes).toEqual(["proxmox:pve2:200"]); + expect(result.action).toBe("stop"); + }); + + it("should execute shutdown action", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "pve1", + type: "qemu", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + mockClient.post.mockResolvedValue("UPID:pve1:00001234:task123"); + mockClient.waitForTask.mockResolvedValue(undefined); + + // Initialize service + await service.initialize(); + + // Execute shutdown action + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "shutdown", + }; + + const result = await service.executeAction(action); + + // Verify API call + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100/status/shutdown", + {} + ); + + // Verify result + expect(result.status).toBe("success"); + }); + + it("should execute reboot action", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "pve1", + type: "qemu", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + mockClient.post.mockResolvedValue("UPID:pve1:00001234:task123"); + mockClient.waitForTask.mockResolvedValue(undefined); + + // Initialize service + await service.initialize(); + + // Execute reboot action + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "reboot", + }; + + const result = await service.executeAction(action); + + // Verify API call + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100/status/reboot", + {} + ); + + // Verify result + expect(result.status).toBe("success"); + }); + + it("should execute suspend action on a VM", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "pve1", + type: "qemu", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + mockClient.post.mockResolvedValue("UPID:pve1:00001234:task123"); + mockClient.waitForTask.mockResolvedValue(undefined); + + // Initialize service + await service.initialize(); + + // Execute suspend action + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "suspend", + }; + + const result = await service.executeAction(action); + + // Verify API call + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100/status/suspend", + {} + ); + + // Verify result + expect(result.status).toBe("success"); + }); + + it("should execute resume action on a VM", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "pve1", + type: "qemu", + status: "paused", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + mockClient.post.mockResolvedValue("UPID:pve1:00001234:task123"); + mockClient.waitForTask.mockResolvedValue(undefined); + + // Initialize service + await service.initialize(); + + // Execute resume action + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "resume", + }; + + const result = await service.executeAction(action); + + // Verify API call + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100/status/resume", + {} + ); + + // Verify result + expect(result.status).toBe("success"); + }); + + it("should reject suspend action on LXC container", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 200, + name: "test-container", + node: "pve2", + type: "lxc", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // Execute suspend action on LXC + const action = { + type: "task" as const, + target: "proxmox:pve2:200", + action: "suspend", + }; + + const result = await service.executeAction(action); + + // Verify result is failure + expect(result.status).toBe("failed"); + expect(result.error).toContain("not supported for LXC containers"); + expect(result.results[0].status).toBe("failed"); + }); + + it("should reject resume action on LXC container", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 200, + name: "test-container", + node: "pve2", + type: "lxc", + status: "paused", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // Execute resume action on LXC + const action = { + type: "task" as const, + target: "proxmox:pve2:200", + action: "resume", + }; + + const result = await service.executeAction(action); + + // Verify result is failure + expect(result.status).toBe("failed"); + expect(result.error).toContain("not supported for LXC containers"); + }); + + it("should handle action failure with error details", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "pve1", + type: "qemu", + status: "stopped", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + mockClient.post.mockRejectedValue(new Error("API error: VM is locked")); + + // Initialize service + await service.initialize(); + + // Execute start action + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "start", + }; + + const result = await service.executeAction(action); + + // Verify result contains error + expect(result.status).toBe("failed"); + expect(result.error).toContain("API error: VM is locked"); + expect(result.results[0].status).toBe("failed"); + expect(result.results[0].error).toContain("API error: VM is locked"); + }); + + it("should reject unsupported action", async () => { + // Mock cluster resources response + const mockGuests: ProxmoxGuest[] = [ + { + vmid: 100, + name: "test-vm", + node: "pve1", + type: "qemu", + status: "running", + }, + ]; + + mockClient.get.mockResolvedValue(mockGuests); + + // Initialize service + await service.initialize(); + + // Execute unsupported action + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "invalid-action", + }; + + const result = await service.executeAction(action); + + // Verify result is failure + expect(result.status).toBe("failed"); + expect(result.error).toContain("Unsupported action"); + }); + + it("should throw error if client is not initialized", async () => { + // Don't initialize service + const action = { + type: "task" as const, + target: "proxmox:pve1:100", + action: "start", + }; + + await expect(service.executeAction(action)).rejects.toThrow( + "ProxmoxClient not initialized" + ); + }); + + it("should handle invalid nodeId format", async () => { + // Initialize service + await service.initialize(); + + // Execute action with invalid nodeId + const action = { + type: "task" as const, + target: "invalid-node-id", + action: "start", + }; + + const result = await service.executeAction(action); + + // Verify result is failure + expect(result.status).toBe("failed"); + expect(result.error).toContain("Invalid nodeId format"); + }); +}); + +describe("listCapabilities", () => { + it("should return all lifecycle action capabilities", () => { + const capabilities = service.listCapabilities(); + + // Verify we have all 7 capabilities + expect(capabilities).toHaveLength(7); + + // Verify each capability has required fields + capabilities.forEach((cap) => { + expect(cap).toHaveProperty("name"); + expect(cap).toHaveProperty("description"); + expect(cap).toHaveProperty("parameters"); + expect(Array.isArray(cap.parameters)).toBe(true); + }); + + // Verify specific capabilities + const capabilityNames = capabilities.map((c) => c.name); + expect(capabilityNames).toContain("start"); + expect(capabilityNames).toContain("stop"); + expect(capabilityNames).toContain("shutdown"); + expect(capabilityNames).toContain("reboot"); + expect(capabilityNames).toContain("suspend"); + expect(capabilityNames).toContain("resume"); + expect(capabilityNames).toContain("snapshot"); + + // Verify start capability details + const startCap = capabilities.find((c) => c.name === "start"); + expect(startCap?.description).toBe("Start a VM or container"); + expect(startCap?.parameters).toEqual([]); + + // Verify suspend capability mentions VM-only + const suspendCap = capabilities.find((c) => c.name === "suspend"); + expect(suspendCap?.description).toContain("VM"); + expect(suspendCap?.description).toContain("not supported for LXC"); + }); + + it("should return capabilities without requiring initialization", () => { + // Don't initialize service + const capabilities = service.listCapabilities(); + + // Should still return capabilities + expect(capabilities).toHaveLength(7); + }); +}); + +describe("Provisioning Capabilities", () => { + describe("createVM", () => { + it("should create a VM successfully", async () => { + // Mock guest existence check (should not exist) + mockClient.get.mockRejectedValueOnce(new Error("Guest not found")); + + // Mock VM creation + mockClient.post.mockResolvedValueOnce("UPID:pve1:00001234:task"); + mockClient.waitForTask.mockResolvedValueOnce(undefined); + + // Initialize service + await service.initialize(); + + // Create VM + const params = { + vmid: 100, + name: "test-vm", + node: "pve1", + cores: 2, + memory: 2048, + }; + + const result = await service.createVM(params); + + // Verify result + expect(result.status).toBe("success"); + expect(result.action).toBe("create_vm"); + const { node: _node, ...expectedPayload } = params; + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu", + expectedPayload + ); + expect(mockClient.waitForTask).toHaveBeenCalledWith("pve1", "UPID:pve1:00001234:task"); + }); + + it("should reject creation if VMID already exists", async () => { + // Mock guest existence check (exists) - getGuestType queries cluster resources + mockClient.get.mockResolvedValueOnce([ + { vmid: 100, node: "pve1", type: "qemu" } + ]); + + // Initialize service + await service.initialize(); + + // Try to create VM with existing VMID + const params = { + vmid: 100, + name: "test-vm", + node: "pve1", + }; + + const result = await service.createVM(params); + + // Verify result is failure + expect(result.status).toBe("failed"); + expect(result.error).toContain("already exists"); + expect(mockClient.post).not.toHaveBeenCalled(); + }); + }); + + describe("createLXC", () => { + it("should create an LXC container successfully", async () => { + // Mock guest existence check (should not exist) + mockClient.get.mockRejectedValueOnce(new Error("Guest not found")); + + // Mock LXC creation + mockClient.post.mockResolvedValueOnce("UPID:pve1:00001235:task"); + mockClient.waitForTask.mockResolvedValueOnce(undefined); + + // Initialize service + await service.initialize(); + + // Create LXC + const params = { + vmid: 101, + hostname: "test-container", + node: "pve1", + ostemplate: "local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst", + cores: 1, + memory: 512, + }; + + const result = await service.createLXC(params); + + // Verify result + expect(result.status).toBe("success"); + expect(result.action).toBe("create_lxc"); + const { node: _node, ...expectedPayload } = params; + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/lxc", + expectedPayload + ); + expect(mockClient.waitForTask).toHaveBeenCalledWith("pve1", "UPID:pve1:00001235:task"); + }); + + it("should reject creation if VMID already exists", async () => { + // Mock guest existence check (exists) - getGuestType queries cluster resources + mockClient.get.mockResolvedValueOnce([ + { vmid: 101, node: "pve1", type: "lxc" } + ]); + + // Initialize service + await service.initialize(); + + // Try to create LXC with existing VMID + const params = { + vmid: 101, + hostname: "test-container", + node: "pve1", + ostemplate: "local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst", + }; + + const result = await service.createLXC(params); + + // Verify result is failure + expect(result.status).toBe("failed"); + expect(result.error).toContain("already exists"); + expect(mockClient.post).not.toHaveBeenCalled(); + }); + }); + + describe("destroyGuest", () => { + it("should destroy a running guest after stopping it", async () => { + // Mock guest existence check - getGuestType queries cluster resources (called twice) + mockClient.get + .mockResolvedValueOnce([{ vmid: 100, node: "pve1", type: "qemu" }]) // guestExists -> getGuestType + .mockResolvedValueOnce([{ vmid: 100, node: "pve1", type: "qemu" }]) // getGuestType again + .mockResolvedValueOnce({ status: "running" }); // status check + + // Mock stop and delete operations + mockClient.post.mockResolvedValueOnce("UPID:pve1:00001236:stop"); + mockClient.waitForTask.mockResolvedValueOnce(undefined); + mockClient.delete.mockResolvedValueOnce("UPID:pve1:00001237:delete"); + mockClient.waitForTask.mockResolvedValueOnce(undefined); + + // Initialize service + await service.initialize(); + + // Destroy guest + const result = await service.destroyGuest("pve1", 100); + + // Verify result + expect(result.status).toBe("success"); + expect(result.action).toBe("destroy_guest"); + expect(mockClient.post).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100/status/stop", + {} + ); + expect(mockClient.delete).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100" + ); + }); + + it("should destroy a stopped guest without stopping it first", async () => { + // Mock guest existence check - getGuestType queries cluster resources (called twice) + mockClient.get + .mockResolvedValueOnce([{ vmid: 100, node: "pve1", type: "qemu" }]) // guestExists -> getGuestType + .mockResolvedValueOnce([{ vmid: 100, node: "pve1", type: "qemu" }]) // getGuestType again + .mockResolvedValueOnce({ status: "stopped" }); // status check + + // Mock delete operation + mockClient.delete.mockResolvedValueOnce("UPID:pve1:00001238:delete"); + mockClient.waitForTask.mockResolvedValueOnce(undefined); + + // Initialize service + await service.initialize(); + + // Destroy guest + const result = await service.destroyGuest("pve1", 100); + + // Verify result + expect(result.status).toBe("success"); + expect(mockClient.post).not.toHaveBeenCalled(); // Should not stop + expect(mockClient.delete).toHaveBeenCalledWith( + "/api2/json/nodes/pve1/qemu/100" + ); + }); + + it("should return error if guest does not exist", async () => { + // Mock guest existence check (does not exist) + mockClient.get.mockRejectedValueOnce(new Error("Guest not found")); + + // Initialize service + await service.initialize(); + + // Try to destroy non-existent guest + const result = await service.destroyGuest("pve1", 999); + + // Verify result is failure + expect(result.status).toBe("failed"); + expect(result.error).toContain("not found"); + expect(mockClient.delete).not.toHaveBeenCalled(); + }); + }); + + describe("listProvisioningCapabilities", () => { + it("should return all provisioning capabilities", () => { + const capabilities = service.listProvisioningCapabilities(); + + // Verify we have all 4 provisioning capabilities + expect(capabilities).toHaveLength(4); + + // Verify each capability has required fields + capabilities.forEach((cap) => { + expect(cap).toHaveProperty("name"); + expect(cap).toHaveProperty("description"); + expect(cap).toHaveProperty("operation"); + expect(cap).toHaveProperty("parameters"); + expect(Array.isArray(cap.parameters)).toBe(true); + }); + + // Verify specific capabilities + const capabilityNames = capabilities.map((c) => c.name); + expect(capabilityNames).toContain("create_vm"); + expect(capabilityNames).toContain("create_lxc"); + expect(capabilityNames).toContain("destroy_vm"); + expect(capabilityNames).toContain("destroy_lxc"); + + // Verify create_vm capability details + const createVmCap = capabilities.find((c) => c.name === "create_vm"); + expect(createVmCap?.operation).toBe("create"); + expect(createVmCap?.description).toContain("virtual machine"); + expect(createVmCap?.parameters.length).toBeGreaterThan(0); + + // Verify destroy_vm capability details + const destroyVmCap = capabilities.find((c) => c.name === "destroy_vm"); + expect(destroyVmCap?.operation).toBe("destroy"); + expect(destroyVmCap?.description).toContain("virtual machine"); + }); + + it("should return capabilities without requiring initialization", () => { + // Don't initialize service + const capabilities = service.listProvisioningCapabilities(); + + // Should still return capabilities + expect(capabilities).toHaveLength(4); + }); + }); + + describe("executeAction with provisioning actions", () => { + it("should route create_vm action to createVM method", async () => { + // Mock guest existence check (should not exist) + mockClient.get.mockRejectedValueOnce(new Error("Guest not found")); + + // Mock VM creation + mockClient.post.mockResolvedValueOnce("UPID:pve1:00001239:task"); + mockClient.waitForTask.mockResolvedValueOnce(undefined); + + // Initialize service + await service.initialize(); + + // Execute create_vm action + const action = { + type: "task" as const, + target: "", + action: "create_vm", + parameters: { + vmid: 100, + name: "test-vm", + node: "pve1", + }, + }; + + const result = await service.executeAction(action); + + // Verify result + expect(result.status).toBe("success"); + expect(result.action).toBe("create_vm"); + }); + + it("should route destroy_vm action to destroyGuest method", async () => { + // Mock guest existence check - getGuestType queries cluster resources (called twice) + mockClient.get + .mockResolvedValueOnce([{ vmid: 100, node: "pve1", type: "qemu" }]) // guestExists -> getGuestType + .mockResolvedValueOnce([{ vmid: 100, node: "pve1", type: "qemu" }]) // getGuestType again + .mockResolvedValueOnce({ status: "stopped" }); // status check + + // Mock delete operation + mockClient.delete.mockResolvedValueOnce("UPID:pve1:00001240:delete"); + mockClient.waitForTask.mockResolvedValueOnce(undefined); + + // Initialize service + await service.initialize(); + + // Execute destroy_vm action + const action = { + type: "task" as const, + target: "", + action: "destroy_vm", + parameters: { + node: "pve1", + vmid: 100, + }, + }; + + const result = await service.executeAction(action); + + // Verify result + expect(result.status).toBe("success"); + expect(result.action).toBe("destroy_guest"); + }); + }); +}); + + describe("computeType field", () => { + it("should include computeType 'vm' for qemu guests", async () => { + const mockGuests: ProxmoxGuest[] = [ + { vmid: 100, name: "test-vm", node: "pve1", type: "qemu", status: "running" }, + ]; + mockClient.get.mockResolvedValue(mockGuests); + await service.initialize(); + + const nodes = await service.getInventory(); + + expect(nodes).toHaveLength(1); + expect((nodes[0] as any).computeType).toBe("vm"); + }); + + it("should include computeType 'lxc' for lxc guests", async () => { + const mockGuests: ProxmoxGuest[] = [ + { vmid: 200, name: "test-ct", node: "pve1", type: "lxc", status: "running" }, + ]; + mockClient.get.mockResolvedValue(mockGuests); + await service.initialize(); + + const nodes = await service.getInventory(); + + expect(nodes).toHaveLength(1); + expect((nodes[0] as any).computeType).toBe("lxc"); + }); + }); + + describe("getInventory with computeType filter", () => { + const mixedGuests: ProxmoxGuest[] = [ + { vmid: 100, name: "vm-1", node: "pve1", type: "qemu", status: "running" }, + { vmid: 101, name: "vm-2", node: "pve1", type: "qemu", status: "stopped" }, + { vmid: 200, name: "ct-1", node: "pve1", type: "lxc", status: "running" }, + ]; + + it("should return only VMs when computeType is 'qemu'", async () => { + mockClient.get.mockResolvedValue(mixedGuests); + await service.initialize(); + + const nodes = await service.getInventory("qemu"); + + expect(nodes).toHaveLength(2); + nodes.forEach((n) => expect((n as any).computeType).toBe("vm")); + }); + + it("should return only containers when computeType is 'lxc'", async () => { + mockClient.get.mockResolvedValue(mixedGuests); + await service.initialize(); + + const nodes = await service.getInventory("lxc"); + + expect(nodes).toHaveLength(1); + expect((nodes[0] as any).computeType).toBe("lxc"); + }); + + it("should return all guests when computeType is undefined", async () => { + mockClient.get.mockResolvedValue(mixedGuests); + await service.initialize(); + + const nodes = await service.getInventory(); + + expect(nodes).toHaveLength(3); + }); + + it("should filter cached results by computeType", async () => { + mockClient.get.mockResolvedValue(mixedGuests); + await service.initialize(); + + // First call populates cache + await service.getInventory(); + expect(mockClient.get).toHaveBeenCalledTimes(1); + + // Second call with filter should use cache but filter + const vms = await service.getInventory("qemu"); + expect(mockClient.get).toHaveBeenCalledTimes(1); // Still cached + expect(vms).toHaveLength(2); + vms.forEach((n) => expect((n as any).computeType).toBe("vm")); + }); + }); + + describe("getGroups type group names", () => { + it("should use 'Proxmox VMs' and 'Proxmox Containers' as group names", async () => { + const mockGuests: ProxmoxGuest[] = [ + { vmid: 100, name: "vm-1", node: "pve1", type: "qemu", status: "running" }, + { vmid: 200, name: "ct-1", node: "pve1", type: "lxc", status: "running" }, + ]; + mockClient.get.mockResolvedValue(mockGuests); + await service.initialize(); + + const groups = await service.getGroups(); + + const vmGroup = groups.find((g) => g.id === "proxmox:type:qemu"); + expect(vmGroup?.name).toBe("Proxmox VMs"); + + const lxcGroup = groups.find((g) => g.id === "proxmox:type:lxc"); + expect(lxcGroup?.name).toBe("Proxmox Containers"); + }); + }); +}); diff --git a/backend/src/integrations/proxmox/types.ts b/backend/src/integrations/proxmox/types.ts new file mode 100644 index 00000000..82f059e6 --- /dev/null +++ b/backend/src/integrations/proxmox/types.ts @@ -0,0 +1,178 @@ +/** + * Proxmox Virtual Environment Integration Types + * + * Type definitions for the Proxmox VE integration plugin. + */ + +import type { ProvisioningCapability } from "../types"; + +export type { ProvisioningCapability }; + +/** + * Proxmox configuration + */ +export interface ProxmoxConfig { + host: string; + port?: number; + username?: string; + password?: string; + realm?: string; + token?: string; + ssl?: ProxmoxSSLConfig; + timeout?: number; +} + +/** + * SSL configuration for Proxmox client + */ +export interface ProxmoxSSLConfig { + rejectUnauthorized?: boolean; + ca?: string; + cert?: string; + key?: string; +} + +/** + * Proxmox guest (VM or LXC) from API + */ +export interface ProxmoxGuest { + vmid: number; + name: string; + node: string; + type: "qemu" | "lxc"; + status: "running" | "stopped" | "paused"; + template?: number; // 1 if this is a template, 0 or undefined otherwise + maxmem?: number; + maxdisk?: number; + cpus?: number; + uptime?: number; + netin?: number; + netout?: number; + diskread?: number; + diskwrite?: number; +} + +/** + * Proxmox guest configuration + */ +export interface ProxmoxGuestConfig { + vmid: number; + name: string; + hostname?: string; + cores: number; + memory: number; + sockets?: number; + cpu?: string; + bootdisk?: string; + scsihw?: string; + ostype?: string; + net0?: string; + net1?: string; + ide2?: string; + [key: string]: unknown; +} + +/** + * Proxmox guest status + */ +export interface ProxmoxGuestStatus { + status: "running" | "stopped" | "paused"; + vmid: number; + uptime?: number; + cpus?: number; + maxmem?: number; + mem?: number; + maxdisk?: number; + disk?: number; + netin?: number; + netout?: number; + diskread?: number; + diskwrite?: number; +} + +/** + * VM creation parameters + */ +export interface VMCreateParams { + vmid: number; + name: string; + node: string; + cores?: number; + memory?: number; + sockets?: number; + cpu?: string; + scsi0?: string; + ide2?: string; + net0?: string; + ostype?: string; + [key: string]: unknown; +} + +/** + * LXC creation parameters + */ +export interface LXCCreateParams { + vmid: number; + hostname: string; + node: string; + ostemplate: string; + cores?: number; + memory?: number; + rootfs?: string; + net0?: string; + password?: string; + [key: string]: unknown; +} + +/** + * Proxmox task status + */ +export interface ProxmoxTaskStatus { + status: "running" | "stopped"; + exitstatus?: string; + type: string; + node: string; + pid: number; + pstart: number; + starttime: number; + upid: string; +} + +/** + * Retry configuration + */ +export interface RetryConfig { + maxAttempts: number; + initialDelay: number; + maxDelay: number; + backoffMultiplier: number; + retryableErrors: string[]; +} + +/** + * Proxmox error classes + */ +export class ProxmoxError extends Error { + constructor( + message: string, + public code: string, + public details?: unknown + ) { + super(message); + this.name = "ProxmoxError"; + } +} + +export class ProxmoxAuthenticationError extends ProxmoxError { + constructor(message: string, details?: unknown) { + super(message, "PROXMOX_AUTH_ERROR", details); + this.name = "ProxmoxAuthenticationError"; + } +} + +export class ProxmoxConnectionError extends ProxmoxError { + constructor(message: string, details?: unknown) { + super(message, "PROXMOX_CONNECTION_ERROR", details); + this.name = "ProxmoxConnectionError"; + } +} diff --git a/backend/src/integrations/types.ts b/backend/src/integrations/types.ts index c1c7af3c..08c5f940 100644 --- a/backend/src/integrations/types.ts +++ b/backend/src/integrations/types.ts @@ -81,6 +81,13 @@ export interface Capability { parameters?: CapabilityParameter[]; } +/** + * Provisioning capability for infrastructure creation/destruction + */ +export interface ProvisioningCapability extends Capability { + operation: "create" | "destroy"; +} + /** * Parameter definition for a capability */ @@ -90,6 +97,12 @@ export interface CapabilityParameter { required: boolean; description?: string; default?: unknown; + validation?: { + min?: number; + max?: number; + pattern?: string; + enum?: string[]; + }; } /** diff --git a/backend/src/middleware/authMiddleware.ts b/backend/src/middleware/authMiddleware.ts index efc33e68..e2f83d7b 100644 --- a/backend/src/middleware/authMiddleware.ts +++ b/backend/src/middleware/authMiddleware.ts @@ -1,6 +1,6 @@ import type { Request, Response, NextFunction } from "express"; import { AuthenticationService } from "../services/AuthenticationService"; -import type { Database } from "sqlite3"; +import type { DatabaseAdapter } from "../database/DatabaseAdapter"; import { ERROR_CODES, sendAuthenticationError, sendDatabaseError, isDatabaseConnectionError } from "../utils/errorHandling"; // Extend Express Request to include user payload @@ -27,7 +27,7 @@ declare global { * * Requirements: 5.1, 6.6, 16.1, 16.5 */ -export function createAuthMiddleware(db: Database, jwtSecret?: string) { +export function createAuthMiddleware(db: DatabaseAdapter, jwtSecret?: string) { const authService = new AuthenticationService(db, jwtSecret); return async (req: Request, res: Response, next: NextFunction): Promise => { diff --git a/backend/src/middleware/rbacMiddleware.ts b/backend/src/middleware/rbacMiddleware.ts index 35c85057..a07ae23a 100644 --- a/backend/src/middleware/rbacMiddleware.ts +++ b/backend/src/middleware/rbacMiddleware.ts @@ -1,7 +1,7 @@ import type { Request, Response, NextFunction } from "express"; import { PermissionService } from "../services/PermissionService"; import { AuditLoggingService } from "../services/AuditLoggingService"; -import type { Database } from "sqlite3"; +import type { DatabaseAdapter } from "../database/DatabaseAdapter"; import { ERROR_CODES, sendAuthorizationError, sendDatabaseError, isDatabaseConnectionError } from "../utils/errorHandling"; import { LoggerService } from "../services/LoggerService"; @@ -20,7 +20,7 @@ import { LoggerService } from "../services/LoggerService"; * * Requirements: 5.2, 5.3, 5.4, 7.4, 16.2, 16.5 */ -export function createRbacMiddleware(db: Database) { +export function createRbacMiddleware(db: DatabaseAdapter) { const permissionService = new PermissionService(db); const auditLogger = new AuditLoggingService(db); const logger = new LoggerService(); diff --git a/backend/src/routes/config.ts b/backend/src/routes/config.ts index b3c0da1a..7aa3bb98 100644 --- a/backend/src/routes/config.ts +++ b/backend/src/routes/config.ts @@ -20,4 +20,19 @@ router.get( }), ); +/** + * GET /api/config/provisioning + * Get provisioning safety configuration + */ +router.get( + "/provisioning", + asyncHandler((_req, res) => { + res.json({ + provisioning: { + allowDestructiveActions: configService.isDestructiveProvisioningAllowed(), + }, + }); + }), +); + export default router; diff --git a/backend/src/routes/facts.ts b/backend/src/routes/facts.ts index acf74db9..700ea7b5 100644 --- a/backend/src/routes/facts.ts +++ b/backend/src/routes/facts.ts @@ -444,5 +444,124 @@ export function createFactsRouter( }), ); + /** + * GET /api/nodes/:id/facts + * Get facts for a node from all available information sources + */ + router.get( + "/:id/facts", + asyncHandler(async (req: Request, res: Response): Promise => { + const startTime = Date.now(); + + logger.info("Fetching facts from all sources", { + component: "FactsRouter", + operation: "getAllFacts", + metadata: { nodeId: req.params.id }, + }); + + try { + const params = NodeIdParamSchema.parse(req.params); + const nodeId = params.id; + + // Skip the expensive getAggregatedInventory() call — if no source + // knows about this node the response will simply have empty sources, + // which the frontend already handles gracefully. + + // Gather facts from all information sources in parallel + const sources = integrationManager.getAllInformationSources(); + const factsResults: Record< + string, + { facts: Record; timestamp: string } + > = {}; + const errors: Record = {}; + + // Per-source timeout (5s) — keep the page snappy + const SOURCE_TIMEOUT_MS = 5_000; + + const promises = sources.map(async (source) => { + if (!source.isInitialized()) return; + try { + const timeoutPromise = new Promise((_, reject) => + setTimeout( + () => reject(new Error(`Timeout after ${SOURCE_TIMEOUT_MS}ms`)), + SOURCE_TIMEOUT_MS, + ), + ); + const nodeFacts = await Promise.race([ + source.getNodeFacts(nodeId), + timeoutPromise, + ]); + if (nodeFacts && typeof nodeFacts === "object") { + const raw = nodeFacts as unknown as Record; + const factsObj = + "facts" in raw && typeof raw.facts === "object" && raw.facts !== null + ? (raw.facts as Record) + : raw; + const timestamp = + typeof raw.gatheredAt === "string" + ? raw.gatheredAt + : typeof raw.timestamp === "string" + ? raw.timestamp + : new Date().toISOString(); + factsResults[source.name] = { facts: factsObj, timestamp }; + } + } catch (error) { + const errorMsg = + error instanceof Error ? error.message : String(error); + errors[source.name] = errorMsg; + logger.warn(`Failed to gather facts from '${source.name}'`, { + component: "FactsRouter", + operation: "getAllFacts", + metadata: { nodeId, source: source.name, error: errorMsg }, + }); + } + }); + + await Promise.all(promises); + + const duration = Date.now() - startTime; + + logger.info("Facts fetched from all sources", { + component: "FactsRouter", + operation: "getAllFacts", + metadata: { + nodeId, + sources: Object.keys(factsResults), + errorSources: Object.keys(errors), + duration, + }, + }); + + res.json({ + sources: factsResults, + errors: Object.keys(errors).length > 0 ? errors : undefined, + }); + } catch (error) { + if (error instanceof z.ZodError) { + res.status(400).json({ + error: { + code: "INVALID_REQUEST", + message: "Invalid node ID parameter", + details: error.errors, + }, + }); + return; + } + + logger.error("Error fetching facts", { + component: "FactsRouter", + operation: "getAllFacts", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: "INTERNAL_SERVER_ERROR", + message: "Failed to fetch facts", + }, + }); + } + }), + ); + return router; } diff --git a/backend/src/routes/integrationConfig.ts b/backend/src/routes/integrationConfig.ts new file mode 100644 index 00000000..cd0f4af7 --- /dev/null +++ b/backend/src/routes/integrationConfig.ts @@ -0,0 +1,277 @@ +import { Router, type Request, type Response } from "express"; +import { z } from "zod"; +import { ZodError } from "zod"; +import { asyncHandler } from "./asyncHandler"; +import { IntegrationConfigService } from "../services/IntegrationConfigService"; +import type { DatabaseService } from "../database/DatabaseService"; +import { LoggerService } from "../services/LoggerService"; +import { sendValidationError, ERROR_CODES } from "../utils/errorHandling"; +import { createAuthMiddleware } from "../middleware/authMiddleware"; +import { createRbacMiddleware } from "../middleware/rbacMiddleware"; + +const logger = new LoggerService(); + +/** + * Zod schema for the integration name route parameter + */ +const IntegrationNameSchema = z.object({ + name: z.string().min(1, "Integration name is required"), +}); + +/** + * Zod schema for saving an integration config (PUT body) + */ +const SaveConfigSchema = z.object({ + config: z.record(z.unknown()), +}); + +/** + * Create integration config routes + * + * Requirements: 18.1, 19.1, 21.2, 27.4 + */ +export function createIntegrationConfigRouter(databaseService: DatabaseService): Router { + const router = Router(); + const configService = new IntegrationConfigService( + databaseService.getConnection(), + process.env.JWT_SECRET ?? "", + ); + const authMiddleware = createAuthMiddleware(databaseService.getConnection()); + const rbacMiddleware = createRbacMiddleware(databaseService.getConnection()); + + /** + * GET /api/config/integrations + * List all integration configs for the authenticated user + * + * Requirements: 18.1 + */ + router.get( + "/", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware("integration_config", "read")), + asyncHandler(async (req: Request, res: Response): Promise => { + const userId = req.user?.userId; + + logger.info("Processing list integration configs request", { + component: "IntegrationConfigRouter", + operation: "listConfigs", + metadata: { userId }, + }); + + if (!userId) { + res.status(401).json({ + error: { + code: ERROR_CODES.UNAUTHORIZED, + message: "Authentication required", + }, + }); + return; + } + + try { + const configs = await configService.listConfigs(userId); + res.status(200).json({ configs }); + } catch (error) { + logger.error("List integration configs failed", { + component: "IntegrationConfigRouter", + operation: "listConfigs", + metadata: { userId }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to list integration configs", + }, + }); + } + }) + ); + + /** + * GET /api/config/integrations/:name + * Get effective (merged) config for an integration + * + * Requirements: 19.1 + */ + router.get( + "/:name", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware("integration_config", "read")), + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing get effective config request", { + component: "IntegrationConfigRouter", + operation: "getEffectiveConfig", + metadata: { userId: req.user?.userId, name: req.params.name }, + }); + + try { + const { name } = IntegrationNameSchema.parse(req.params); + const config = await configService.getEffectiveConfig(name); + res.status(200).json({ config }); + } catch (error) { + if (error instanceof ZodError) { + logger.warn("Get effective config validation failed", { + component: "IntegrationConfigRouter", + operation: "getEffectiveConfig", + metadata: { errors: error.errors }, + }); + sendValidationError(res, error); + return; + } + + logger.error("Get effective config failed", { + component: "IntegrationConfigRouter", + operation: "getEffectiveConfig", + metadata: { userId: req.user?.userId, name: req.params.name }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to get effective config", + }, + }); + } + }) + ); + + /** + * PUT /api/config/integrations/:name + * Save (upsert) an integration config for the authenticated user + * + * Requirements: 18.1, 21.2 + */ + router.put( + "/:name", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware("integration_config", "configure")), + asyncHandler(async (req: Request, res: Response): Promise => { + const userId = req.user?.userId; + + logger.info("Processing save integration config request", { + component: "IntegrationConfigRouter", + operation: "saveConfig", + metadata: { userId, name: req.params.name }, + }); + + if (!userId) { + res.status(401).json({ + error: { + code: ERROR_CODES.UNAUTHORIZED, + message: "Authentication required", + }, + }); + return; + } + + try { + const { name } = IntegrationNameSchema.parse(req.params); + const { config } = SaveConfigSchema.parse(req.body); + + await configService.saveConfig(userId, name, config); + + logger.info("Integration config saved successfully", { + component: "IntegrationConfigRouter", + operation: "saveConfig", + metadata: { userId, name }, + }); + + res.status(200).json({ message: "Config saved successfully" }); + } catch (error) { + if (error instanceof ZodError) { + logger.warn("Save config validation failed", { + component: "IntegrationConfigRouter", + operation: "saveConfig", + metadata: { errors: error.errors }, + }); + sendValidationError(res, error); + return; + } + + logger.error("Save integration config failed", { + component: "IntegrationConfigRouter", + operation: "saveConfig", + metadata: { userId, name: req.params.name }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to save integration config", + }, + }); + } + }) + ); + + /** + * DELETE /api/config/integrations/:name + * Delete an integration config for the authenticated user + * + * Requirements: 18.1 + */ + router.delete( + "/:name", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware("integration_config", "configure")), + asyncHandler(async (req: Request, res: Response): Promise => { + const userId = req.user?.userId; + + logger.info("Processing delete integration config request", { + component: "IntegrationConfigRouter", + operation: "deleteConfig", + metadata: { userId, name: req.params.name }, + }); + + if (!userId) { + res.status(401).json({ + error: { + code: ERROR_CODES.UNAUTHORIZED, + message: "Authentication required", + }, + }); + return; + } + + try { + const { name } = IntegrationNameSchema.parse(req.params); + + await configService.deleteConfig(userId, name); + + logger.info("Integration config deleted successfully", { + component: "IntegrationConfigRouter", + operation: "deleteConfig", + metadata: { userId, name }, + }); + + res.status(200).json({ message: "Config deleted successfully" }); + } catch (error) { + if (error instanceof ZodError) { + logger.warn("Delete config validation failed", { + component: "IntegrationConfigRouter", + operation: "deleteConfig", + metadata: { errors: error.errors }, + }); + sendValidationError(res, error); + return; + } + + logger.error("Delete integration config failed", { + component: "IntegrationConfigRouter", + operation: "deleteConfig", + metadata: { userId, name: req.params.name }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to delete integration config", + }, + }); + } + }) + ); + + return router; +} diff --git a/backend/src/routes/integrations.ts b/backend/src/routes/integrations.ts index 07065939..956aaaac 100644 --- a/backend/src/routes/integrations.ts +++ b/backend/src/routes/integrations.ts @@ -1,5 +1,5 @@ import { Router } from "express"; -import type { Database } from "sqlite3"; +import type { DatabaseAdapter } from "../database/DatabaseAdapter"; import type { IntegrationManager } from "../integrations/IntegrationManager"; import type { PuppetDBService } from "../integrations/puppetdb/PuppetDBService"; import type { PuppetserverService } from "../integrations/puppetserver/PuppetserverService"; @@ -7,6 +7,8 @@ import { createColorsRouter } from "./integrations/colors"; import { createStatusRouter } from "./integrations/status"; import { createPuppetDBRouter } from "./integrations/puppetdb"; import { createPuppetserverRouter } from "./integrations/puppetserver"; +import { createProxmoxRouter } from "./integrations/proxmox"; +import { createProvisioningRouter } from "./integrations/provisioning"; import { createAuthMiddleware } from "../middleware/authMiddleware"; import { createRbacMiddleware } from "../middleware/rbacMiddleware"; import { asyncHandler } from "./asyncHandler"; @@ -18,8 +20,9 @@ export function createIntegrationsRouter( integrationManager: IntegrationManager, puppetDBService?: PuppetDBService, puppetserverService?: PuppetserverService, - db?: Database, + db?: DatabaseAdapter, jwtSecret?: string, + options?: { allowDestructiveProvisioning?: boolean }, ): Router { const router = Router(); @@ -53,5 +56,27 @@ export function createIntegrationsRouter( // Mount Puppetserver router (handles not configured case internally) router.use("/puppetserver", createPuppetserverRouter(puppetserverService, puppetDBService)); + // Mount Proxmox router + router.use("/proxmox", createProxmoxRouter(integrationManager, { + allowDestructiveActions: options?.allowDestructiveProvisioning ?? true, + })); + + // Mount Provisioning router (integration discovery) with authentication + // Validates Requirements: 1.3, 2.1, 9.1, 9.2 + if (db) { + const authMiddleware = createAuthMiddleware(db, jwtSecret); + const rbacMiddleware = createRbacMiddleware(db); + + router.use( + "/provisioning", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware('provisioning', 'read')), + createProvisioningRouter(integrationManager) + ); + } else { + // Fallback for cases where database is not available (e.g., tests) + router.use("/provisioning", createProvisioningRouter(integrationManager)); + } + return router; } diff --git a/backend/src/routes/integrations/aws.ts b/backend/src/routes/integrations/aws.ts new file mode 100644 index 00000000..78f5ad7b --- /dev/null +++ b/backend/src/routes/integrations/aws.ts @@ -0,0 +1,541 @@ +import { Router, type Request, type Response } from "express"; +import { z } from "zod"; +import { ZodError } from "zod"; +import { asyncHandler } from "../asyncHandler"; +import type { AWSPlugin } from "../../integrations/aws/AWSPlugin"; +import type { IntegrationManager } from "../../integrations/IntegrationManager"; +import { AWSAuthenticationError } from "../../integrations/aws/types"; +import { LoggerService } from "../../services/LoggerService"; +import { sendValidationError, ERROR_CODES } from "../../utils/errorHandling"; + + +const logger = new LoggerService(); + +/** + * Zod schema for region query parameter + */ +const RegionQuerySchema = z.object({ + region: z.string().min(1, "Region is required"), +}); + +/** + * Zod schema for optional region query parameter + */ +const OptionalRegionQuerySchema = z.object({ + region: z.string().min(1).optional(), +}); + +/** + * Zod schema for subnets/security-groups query (region required, vpcId optional) + */ +const RegionVpcQuerySchema = z.object({ + region: z.string().min(1, "Region is required"), + vpcId: z.string().min(1).optional(), +}); + +/** + * Zod schema for provisioning request body + */ +const ProvisionSchema = z.object({ + imageId: z.string().min(1, "AMI image ID is required"), + instanceType: z.string().optional(), + keyName: z.string().optional(), + securityGroupIds: z.array(z.string()).optional(), + subnetId: z.string().optional(), + region: z.string().optional(), + name: z.string().optional(), +}); + +/** + * Zod schema for lifecycle action request body + */ +const LifecycleSchema = z.object({ + instanceId: z.string().min(1, "Instance ID is required"), + action: z.enum(["start", "stop", "reboot", "terminate"]), + region: z.string().optional(), +}); + +/** + * Create AWS integration API routes + * + * Requirements: 8.1, 9.1, 10.1, 11.1, 13.1-13.7, 27.2 + */ +export function createAWSRouter(awsPlugin: AWSPlugin, integrationManager?: IntegrationManager, options?: { allowDestructiveActions?: boolean }): Router { + const router = Router(); + + /** + * GET /api/integrations/aws/inventory + * List EC2 instances + * + * Permission: aws:read + */ + router.get( + "/inventory", + asyncHandler(async (_req: Request, res: Response): Promise => { + logger.info("Processing AWS inventory request", { + component: "AWSRouter", + operation: "getInventory", + }); + + try { + const inventory = await awsPlugin.getInventory(); + res.status(200).json({ inventory }); + } catch (error) { + if (error instanceof AWSAuthenticationError) { + logger.warn("AWS authentication failed during inventory", { + component: "AWSRouter", + operation: "getInventory", + }); + res.status(401).json({ + error: { + code: ERROR_CODES.UNAUTHORIZED, + message: "AWS authentication failed", + }, + }); + return; + } + + logger.error("AWS inventory request failed", { + component: "AWSRouter", + operation: "getInventory", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS inventory", + }, + }); + } + }) + ); + + /** + * POST /api/integrations/aws/provision + * Provision a new EC2 instance + * + * Permission: aws:provision + */ + router.post( + "/provision", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS provision request", { + component: "AWSRouter", + operation: "provision", + metadata: { userId: req.user?.userId }, + }); + + try { + const validatedBody = ProvisionSchema.parse(req.body); + + const result = await awsPlugin.executeAction({ + type: "task", + target: "new", + action: "provision", + parameters: validatedBody, + }); + + logger.info("AWS provision completed", { + component: "AWSRouter", + operation: "provision", + metadata: { status: result.status }, + }); + + // Invalidate inventory cache so the new instance appears immediately + if (result.status === "success") { + integrationManager?.clearInventoryCache(); + } + + res.status(result.status === "success" ? 201 : 200).json({ result }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + if (error instanceof AWSAuthenticationError) { + res.status(401).json({ + error: { + code: ERROR_CODES.UNAUTHORIZED, + message: "AWS authentication failed", + }, + }); + return; + } + + logger.error("AWS provision request failed", { + component: "AWSRouter", + operation: "provision", + metadata: { userId: req.user?.userId }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to provision AWS instance", + }, + }); + } + }) + ); + + /** + * POST /api/integrations/aws/lifecycle + * Execute lifecycle action (start/stop/reboot/terminate) + * + * Permission: aws:lifecycle + */ + router.post( + "/lifecycle", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS lifecycle request", { + component: "AWSRouter", + operation: "lifecycle", + metadata: { userId: req.user?.userId }, + }); + + try { + const validatedBody = LifecycleSchema.parse(req.body); + + // Guard: reject terminate if destructive provisioning actions are disabled + if (validatedBody.action === "terminate" && options?.allowDestructiveActions === false) { + res.status(403).json({ + error: { + code: "DESTRUCTIVE_ACTION_DISABLED", + message: "Destructive provisioning actions are disabled by configuration (ALLOW_DESTRUCTIVE_PROVISIONING=false)", + }, + }); + return; + } + + const target = validatedBody.region + ? `aws:${validatedBody.region}:${validatedBody.instanceId}` + : validatedBody.instanceId; + + const result = await awsPlugin.executeAction({ + type: "command", + target, + action: validatedBody.action, + metadata: validatedBody.region ? { region: validatedBody.region } : undefined, + }); + + logger.info("AWS lifecycle action completed", { + component: "AWSRouter", + operation: "lifecycle", + metadata: { action: validatedBody.action, status: result.status }, + }); + + // Invalidate inventory cache so state changes appear immediately + if (result.status === "success") { + integrationManager?.clearInventoryCache(); + } + + res.status(200).json({ result }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + if (error instanceof AWSAuthenticationError) { + res.status(401).json({ + error: { + code: ERROR_CODES.UNAUTHORIZED, + message: "AWS authentication failed", + }, + }); + return; + } + + logger.error("AWS lifecycle request failed", { + component: "AWSRouter", + operation: "lifecycle", + metadata: { userId: req.user?.userId }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to execute AWS lifecycle action", + }, + }); + } + }) + ); + + /** + * GET /api/integrations/aws/regions + * List available AWS regions + * + * Permission: aws:read + */ + router.get( + "/regions", + asyncHandler(async (_req: Request, res: Response): Promise => { + logger.info("Processing AWS regions request", { + component: "AWSRouter", + operation: "getRegions", + }); + + try { + const regions = await awsPlugin.getRegions(); + res.status(200).json({ regions }); + } catch (error) { + logger.error("AWS regions request failed", { + component: "AWSRouter", + operation: "getRegions", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS regions", + }, + }); + } + }) + ); + + /** + * GET /api/integrations/aws/instance-types + * List available EC2 instance types + * + * Permission: aws:read + */ + router.get( + "/instance-types", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS instance types request", { + component: "AWSRouter", + operation: "getInstanceTypes", + }); + + try { + const { region } = OptionalRegionQuerySchema.parse(req.query); + const instanceTypes = await awsPlugin.getInstanceTypes(region); + res.status(200).json({ instanceTypes }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + logger.error("AWS instance types request failed", { + component: "AWSRouter", + operation: "getInstanceTypes", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS instance types", + }, + }); + } + }) + ); + + /** + * GET /api/integrations/aws/amis + * List available AMIs by region, with optional name search + * + * Permission: aws:read + */ + router.get( + "/amis", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS AMIs request", { + component: "AWSRouter", + operation: "getAMIs", + }); + + try { + const { region } = RegionQuerySchema.parse(req.query); + const search = typeof req.query.search === "string" ? req.query.search.trim() : ""; + + // Build filters: if search is provided, filter by name wildcard + const filters = search + ? [{ name: "name", values: [`*${search}*`] }] + : undefined; + + const amis = await awsPlugin.getAMIs(region, filters); + res.status(200).json({ amis }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + logger.error("AWS AMIs request failed", { + component: "AWSRouter", + operation: "getAMIs", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS AMIs", + }, + }); + } + }) + ); + + /** + * GET /api/integrations/aws/vpcs + * List VPCs by region + * + * Permission: aws:read + */ + router.get( + "/vpcs", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS VPCs request", { + component: "AWSRouter", + operation: "getVPCs", + }); + + try { + const { region } = RegionQuerySchema.parse(req.query); + const vpcs = await awsPlugin.getVPCs(region); + res.status(200).json({ vpcs }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + logger.error("AWS VPCs request failed", { + component: "AWSRouter", + operation: "getVPCs", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS VPCs", + }, + }); + } + }) + ); + + /** + * GET /api/integrations/aws/subnets + * List subnets by region (optional vpcId filter) + * + * Permission: aws:read + */ + router.get( + "/subnets", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS subnets request", { + component: "AWSRouter", + operation: "getSubnets", + }); + + try { + const { region, vpcId } = RegionVpcQuerySchema.parse(req.query); + const subnets = await awsPlugin.getSubnets(region, vpcId); + res.status(200).json({ subnets }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + logger.error("AWS subnets request failed", { + component: "AWSRouter", + operation: "getSubnets", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS subnets", + }, + }); + } + }) + ); + + /** + * GET /api/integrations/aws/security-groups + * List security groups by region (optional vpcId filter) + * + * Permission: aws:read + */ + router.get( + "/security-groups", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS security groups request", { + component: "AWSRouter", + operation: "getSecurityGroups", + }); + + try { + const { region, vpcId } = RegionVpcQuerySchema.parse(req.query); + const securityGroups = await awsPlugin.getSecurityGroups(region, vpcId); + res.status(200).json({ securityGroups }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + logger.error("AWS security groups request failed", { + component: "AWSRouter", + operation: "getSecurityGroups", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS security groups", + }, + }); + } + }) + ); + + /** + * GET /api/integrations/aws/key-pairs + * List key pairs by region + * + * Permission: aws:read + */ + router.get( + "/key-pairs", + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing AWS key pairs request", { + component: "AWSRouter", + operation: "getKeyPairs", + }); + + try { + const { region } = RegionQuerySchema.parse(req.query); + const keyPairs = await awsPlugin.getKeyPairs(region); + res.status(200).json({ keyPairs }); + } catch (error) { + if (error instanceof ZodError) { + sendValidationError(res, error); + return; + } + + logger.error("AWS key pairs request failed", { + component: "AWSRouter", + operation: "getKeyPairs", + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve AWS key pairs", + }, + }); + } + }) + ); + + return router; +} diff --git a/backend/src/routes/integrations/provisioning.ts b/backend/src/routes/integrations/provisioning.ts new file mode 100644 index 00000000..85624cf5 --- /dev/null +++ b/backend/src/routes/integrations/provisioning.ts @@ -0,0 +1,118 @@ +import { Router, type Request, type Response } from "express"; +import type { IntegrationManager } from "../../integrations/IntegrationManager"; +import type { ProxmoxIntegration } from "../../integrations/proxmox/ProxmoxIntegration"; +import type { AWSPlugin } from "../../integrations/aws/AWSPlugin"; +import type { ProvisioningCapability } from "../../integrations/types"; +import { asyncHandler } from "../asyncHandler"; +import { createLogger } from "./utils"; + +interface ProvisioningIntegration { + name: string; + displayName: string; + type: 'virtualization' | 'cloud' | 'container'; + status: 'connected' | 'degraded' | 'not_configured'; + capabilities: ProvisioningCapability[]; +} + +interface ListIntegrationsResponse { + integrations: ProvisioningIntegration[]; +} + +/** + * Create provisioning router for integration discovery + * Validates Requirements: 2.1, 2.2, 13.1, 13.3 + */ +export function createProvisioningRouter( + integrationManager: IntegrationManager +): Router { + const router = Router(); + const logger = createLogger(); + + /** + * GET /api/integrations/provisioning + * List all available provisioning integrations with their capabilities + * Validates Requirements: 2.1, 2.2 + */ + router.get( + "/", + asyncHandler(async (_req: Request, res: Response): Promise => { + logger.info("Fetching provisioning integrations", { + component: "ProvisioningRouter", + operation: "listIntegrations", + }); + + const integrations: ProvisioningIntegration[] = []; + + // Check Proxmox integration + const proxmox = integrationManager.getExecutionTool("proxmox") as ProxmoxIntegration | null; + + if (proxmox) { + // Determine integration status based on health check + let status: 'connected' | 'degraded' | 'not_configured' = 'not_configured'; + const healthCheck = proxmox.getLastHealthCheck(); + + if (healthCheck) { + if (healthCheck.healthy) { + status = 'connected'; + } else if (healthCheck.message?.includes('not initialized') || healthCheck.message?.includes('disabled')) { + status = 'not_configured'; + } else { + status = 'degraded'; + } + } + + const proxmoxIntegration: ProvisioningIntegration = { + name: "proxmox", + displayName: "Proxmox VE", + type: "virtualization", + status, + capabilities: proxmox.listProvisioningCapabilities(), + }; + + integrations.push(proxmoxIntegration); + } + + // Check AWS integration + const aws = integrationManager.getExecutionTool("aws") as AWSPlugin | null; + + if (aws) { + let awsStatus: 'connected' | 'degraded' | 'not_configured' = 'not_configured'; + const awsHealthCheck = aws.getLastHealthCheck(); + + if (awsHealthCheck) { + if (awsHealthCheck.healthy) { + awsStatus = 'connected'; + } else if (awsHealthCheck.message?.includes('not initialized') || awsHealthCheck.message?.includes('disabled')) { + awsStatus = 'not_configured'; + } else { + awsStatus = 'degraded'; + } + } + + const awsIntegration: ProvisioningIntegration = { + name: "aws", + displayName: "Amazon Web Services", + type: "cloud", + status: awsStatus, + capabilities: aws.listProvisioningCapabilities(), + }; + + integrations.push(awsIntegration); + } + + const response: ListIntegrationsResponse = { + integrations, + }; + + logger.info("Provisioning integrations fetched", { + component: "ProvisioningRouter", + operation: "listIntegrations", + metadata: { count: integrations.length }, + }); + + res.status(200).json(response); + }) + ); + + return router; +} diff --git a/backend/src/routes/integrations/proxmox.ts b/backend/src/routes/integrations/proxmox.ts new file mode 100644 index 00000000..773fd0a2 --- /dev/null +++ b/backend/src/routes/integrations/proxmox.ts @@ -0,0 +1,1074 @@ +import { Router, type Request, type Response } from "express"; +import { z } from "zod"; +import type { IntegrationManager } from "../../integrations/IntegrationManager"; +import type { ProxmoxIntegration } from "../../integrations/proxmox/ProxmoxIntegration"; +import type { VMCreateParams, LXCCreateParams } from "../../integrations/proxmox/types"; +import { asyncHandler } from "../asyncHandler"; +import { ExpertModeService } from "../../services/ExpertModeService"; +import { createLogger } from "./utils"; + +/** + * Validation schemas for Proxmox API routes + */ + +// VM creation parameters schema +const VMCreateParamsSchema = z.object({ + vmid: z.number().int().min(100).max(999999999), + name: z.string().min(1).max(50), + node: z.string().min(1).max(20), + cores: z.number().int().min(1).max(128).optional(), + memory: z.number().int().min(16).optional(), + sockets: z.number().int().min(1).max(4).optional(), + cpu: z.string().optional(), + scsi0: z.string().optional(), + ide2: z.string().optional(), + net0: z.string().optional(), + ostype: z.string().optional(), +}); + +// LXC creation parameters schema +const LXCCreateParamsSchema = z.object({ + vmid: z.number().int().min(100).max(999999999), + hostname: z.string().min(1).max(50), + node: z.string().min(1).max(20), + ostemplate: z.string().min(1), + cores: z.number().int().min(1).max(128).optional(), + memory: z.number().int().min(16).optional(), + rootfs: z.string().optional(), + net0: z.string().optional(), + password: z.string().optional(), +}); + +// Action parameters schema +const ActionParamsSchema = z.object({ + nodeId: z.string().regex(/^proxmox:[^:]+:\d+$/), + action: z.enum(["start", "stop", "shutdown", "reboot", "suspend", "resume"]), +}); + +// Destroy parameters schema +const DestroyParamsSchema = z.object({ + vmid: z.string().regex(/^\d+$/), +}); + +/** + * Create Proxmox router for all Proxmox-related routes + */ +export function createProxmoxRouter( + integrationManager: IntegrationManager, + options?: { allowDestructiveActions?: boolean }, +): Router { + const router = Router(); + const logger = createLogger(); + + /** + * Helper function to get Proxmox integration + */ + const getProxmoxIntegration = (): ProxmoxIntegration | null => { + const plugin = integrationManager.getExecutionTool("proxmox"); + return plugin as ProxmoxIntegration | null; + }; + + /** + * GET /api/integrations/proxmox/nodes + * Get list of PVE nodes in the cluster + */ + router.get( + "/nodes", + asyncHandler(async (_req: Request, res: Response): Promise => { + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + res.status(503).json({ error: { code: "PROXMOX_NOT_CONFIGURED", message: "Proxmox integration is not configured" } }); + return; + } + try { + const nodes = await proxmox.getNodes(); + res.json({ nodes }); + } catch (error) { + const msg = error instanceof Error ? error.message : String(error); + logger.error("Failed to fetch PVE nodes", { component: "ProxmoxRouter", operation: "getNodes", metadata: { error: msg } }, error instanceof Error ? error : undefined); + res.status(500).json({ error: { code: "FETCH_NODES_FAILED", message: msg } }); + } + }) + ); + + /** + * GET /api/integrations/proxmox/nextid + * Get the next available VMID + */ + router.get( + "/nextid", + asyncHandler(async (_req: Request, res: Response): Promise => { + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + res.status(503).json({ error: { code: "PROXMOX_NOT_CONFIGURED", message: "Proxmox integration is not configured" } }); + return; + } + try { + const vmid = await proxmox.getNextVMID(); + res.json({ vmid }); + } catch (error) { + const msg = error instanceof Error ? error.message : String(error); + logger.error("Failed to fetch next VMID", { component: "ProxmoxRouter", operation: "getNextVMID", metadata: { error: msg } }, error instanceof Error ? error : undefined); + res.status(500).json({ error: { code: "FETCH_NEXTID_FAILED", message: msg } }); + } + }) + ); + + /** + * GET /api/integrations/proxmox/nodes/:node/isos + * Get ISO images available on a node + * Query params: storage (optional, defaults to 'local') + */ + router.get( + "/nodes/:node/isos", + asyncHandler(async (req: Request, res: Response): Promise => { + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + res.status(503).json({ error: { code: "PROXMOX_NOT_CONFIGURED", message: "Proxmox integration is not configured" } }); + return; + } + const { node } = req.params; + const storage = (req.query.storage as string) || undefined; + try { + const isos = await proxmox.getISOImages(node, storage); + res.json({ isos }); + } catch (error) { + const msg = error instanceof Error ? error.message : String(error); + logger.error("Failed to fetch ISOs", { component: "ProxmoxRouter", operation: "getISOImages", metadata: { node, error: msg } }, error instanceof Error ? error : undefined); + res.status(500).json({ error: { code: "FETCH_ISOS_FAILED", message: msg } }); + } + }) + ); + + /** + * GET /api/integrations/proxmox/nodes/:node/templates + * Get OS templates available on a node + * Query params: storage (optional, defaults to 'local') + */ + router.get( + "/nodes/:node/templates", + asyncHandler(async (req: Request, res: Response): Promise => { + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + res.status(503).json({ error: { code: "PROXMOX_NOT_CONFIGURED", message: "Proxmox integration is not configured" } }); + return; + } + const { node } = req.params; + const storage = (req.query.storage as string) || undefined; + try { + const templates = await proxmox.getTemplates(node, storage); + res.json({ templates }); + } catch (error) { + const msg = error instanceof Error ? error.message : String(error); + logger.error("Failed to fetch templates", { component: "ProxmoxRouter", operation: "getTemplates", metadata: { node, error: msg } }, error instanceof Error ? error : undefined); + res.status(500).json({ error: { code: "FETCH_TEMPLATES_FAILED", message: msg } }); + } + }) + ); + + /** + * GET /api/integrations/proxmox/nodes/:node/storages + * Get available storages on a node, optionally filtered by content type + */ + router.get( + "/nodes/:node/storages", + asyncHandler(async (req: Request, res: Response): Promise => { + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + res.status(503).json({ error: { code: "PROXMOX_NOT_CONFIGURED", message: "Proxmox integration is not configured" } }); + return; + } + const { node } = req.params; + const content = (req.query.content as string) || undefined; + try { + const storages = await proxmox.getStorages(node, content); + res.json({ storages }); + } catch (error) { + const msg = error instanceof Error ? error.message : String(error); + logger.error("Failed to fetch storages", { component: "ProxmoxRouter", operation: "getStorages", metadata: { node, error: msg } }, error instanceof Error ? error : undefined); + res.status(500).json({ error: { code: "FETCH_STORAGES_FAILED", message: msg } }); + } + }) + ); + + /** + * GET /api/integrations/proxmox/nodes/:node/networks + * Get available network bridges on a node + */ + router.get( + "/nodes/:node/networks", + asyncHandler(async (req: Request, res: Response): Promise => { + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + res.status(503).json({ error: { code: "PROXMOX_NOT_CONFIGURED", message: "Proxmox integration is not configured" } }); + return; + } + const { node } = req.params; + const type = (req.query.type as string) || undefined; + try { + const networks = await proxmox.getNetworkBridges(node, type); + res.json({ networks }); + } catch (error) { + const msg = error instanceof Error ? error.message : String(error); + logger.error("Failed to fetch networks", { component: "ProxmoxRouter", operation: "getNetworks", metadata: { node, error: msg } }, error instanceof Error ? error : undefined); + res.status(500).json({ error: { code: "FETCH_NETWORKS_FAILED", message: msg } }); + } + }) + ); + + /** + * POST /api/integrations/proxmox/provision/vm + * Create a new virtual machine + */ + router.post( + "/provision/vm", + asyncHandler(async (req: Request, res: Response): Promise => { + const startTime = Date.now(); + const expertModeService = new ExpertModeService(); + const requestId = req.id ?? expertModeService.generateRequestId(); + + // Create debug info once at the start if expert mode is enabled + const debugInfo = req.expertMode + ? expertModeService.createDebugInfo( + "POST /api/integrations/proxmox/provision/vm", + requestId, + 0 + ) + : null; + + logger.info("Creating Proxmox VM", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createVM", + }); + + if (debugInfo) { + expertModeService.addInfo(debugInfo, { + message: "Creating Proxmox VM", + level: "info", + }); + } + + // Get Proxmox integration + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + logger.warn("Proxmox integration is not configured", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createVM", + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Proxmox integration is not configured", + context: "Proxmox integration is not available", + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "PROXMOX_NOT_CONFIGURED", + message: "Proxmox integration is not configured", + }, + }; + + res + .status(503) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + // Validate request body + const validation = VMCreateParamsSchema.safeParse(req.body); + if (!validation.success) { + logger.warn("Invalid VM creation parameters", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createVM", + metadata: { errors: validation.error.errors }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Invalid VM creation parameters", + context: JSON.stringify(validation.error.errors), + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "INVALID_PARAMETERS", + message: "Invalid VM creation parameters", + details: validation.error.errors, + }, + }; + + res + .status(400) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + const params = validation.data as VMCreateParams; + + if (debugInfo) { + expertModeService.addDebug(debugInfo, { + message: "VM creation parameters validated", + context: JSON.stringify({ vmid: params.vmid, node: params.node }), + level: "debug", + }); + } + + try { + // Execute VM creation through integration + const result = await proxmox.executeAction({ + type: "task", + target: `proxmox:${params.node}:${params.vmid}`, + action: "create_vm", + parameters: params, + }); + + logger.info("VM creation completed", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createVM", + metadata: { vmid: params.vmid, status: result.status }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addInfo(debugInfo, { + message: "VM creation completed", + context: JSON.stringify({ status: result.status }), + level: "info", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const responseData = { result }; + + res + .status(result.status === "success" ? 201 : 500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(responseData, debugInfo) + : responseData + ); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + logger.error( + "Failed to create VM", + { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createVM", + metadata: { vmid: params.vmid, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addError(debugInfo, { + message: "Failed to create VM", + context: errorMessage, + level: "error", + stack: error instanceof Error ? error.stack : undefined, + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "VM_CREATION_FAILED", + message: errorMessage, + }, + }; + + res + .status(500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + } + }) + ); + + /** + * POST /api/integrations/proxmox/provision/lxc + * Create a new LXC container + */ + router.post( + "/provision/lxc", + asyncHandler(async (req: Request, res: Response): Promise => { + const startTime = Date.now(); + const expertModeService = new ExpertModeService(); + const requestId = req.id ?? expertModeService.generateRequestId(); + + const debugInfo = req.expertMode + ? expertModeService.createDebugInfo( + "POST /api/integrations/proxmox/provision/lxc", + requestId, + 0 + ) + : null; + + logger.info("Creating Proxmox LXC container", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createLXC", + }); + + if (debugInfo) { + expertModeService.addInfo(debugInfo, { + message: "Creating Proxmox LXC container", + level: "info", + }); + } + + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + logger.warn("Proxmox integration is not configured", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createLXC", + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Proxmox integration is not configured", + context: "Proxmox integration is not available", + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "PROXMOX_NOT_CONFIGURED", + message: "Proxmox integration is not configured", + }, + }; + + res + .status(503) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + const validation = LXCCreateParamsSchema.safeParse(req.body); + if (!validation.success) { + logger.warn("Invalid LXC creation parameters", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createLXC", + metadata: { errors: validation.error.errors }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Invalid LXC creation parameters", + context: JSON.stringify(validation.error.errors), + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "INVALID_PARAMETERS", + message: "Invalid LXC creation parameters", + details: validation.error.errors, + }, + }; + + res + .status(400) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + const params = validation.data as LXCCreateParams; + + if (debugInfo) { + expertModeService.addDebug(debugInfo, { + message: "LXC creation parameters validated", + context: JSON.stringify({ vmid: params.vmid, node: params.node }), + level: "debug", + }); + } + + try { + const result = await proxmox.executeAction({ + type: "task", + target: `proxmox:${params.node}:${params.vmid}`, + action: "create_lxc", + parameters: params, + }); + + logger.info("LXC creation completed", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createLXC", + metadata: { vmid: params.vmid, status: result.status }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addInfo(debugInfo, { + message: "LXC creation completed", + context: JSON.stringify({ status: result.status }), + level: "info", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const responseData = { result }; + + res + .status(result.status === "success" ? 201 : 500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(responseData, debugInfo) + : responseData + ); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + logger.error( + "Failed to create LXC container", + { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "createLXC", + metadata: { vmid: params.vmid, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addError(debugInfo, { + message: "Failed to create LXC container", + context: errorMessage, + level: "error", + stack: error instanceof Error ? error.stack : undefined, + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "LXC_CREATION_FAILED", + message: errorMessage, + }, + }; + + res + .status(500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + } + }) + ); + + /** + * DELETE /api/integrations/proxmox/provision/:vmid + * Destroy a VM or LXC container + */ + router.delete( + "/provision/:vmid", + asyncHandler(async (req: Request, res: Response): Promise => { + // Guard: reject if destructive provisioning actions are disabled + if (options?.allowDestructiveActions === false) { + res.status(403).json({ + error: { + code: "DESTRUCTIVE_ACTION_DISABLED", + message: "Destructive provisioning actions are disabled by configuration (ALLOW_DESTRUCTIVE_PROVISIONING=false)", + }, + }); + return; + } + + const startTime = Date.now(); + const expertModeService = new ExpertModeService(); + const requestId = req.id ?? expertModeService.generateRequestId(); + + const debugInfo = req.expertMode + ? expertModeService.createDebugInfo( + "DELETE /api/integrations/proxmox/provision/:vmid", + requestId, + 0 + ) + : null; + + logger.info("Destroying Proxmox guest", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "destroyGuest", + }); + + if (debugInfo) { + expertModeService.addInfo(debugInfo, { + message: "Destroying Proxmox guest", + level: "info", + }); + } + + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + logger.warn("Proxmox integration is not configured", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "destroyGuest", + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Proxmox integration is not configured", + context: "Proxmox integration is not available", + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "PROXMOX_NOT_CONFIGURED", + message: "Proxmox integration is not configured", + }, + }; + + res + .status(503) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + // Validate vmid parameter + const validation = DestroyParamsSchema.safeParse(req.params); + if (!validation.success) { + logger.warn("Invalid VMID parameter", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "destroyGuest", + metadata: { errors: validation.error.errors }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Invalid VMID parameter", + context: JSON.stringify(validation.error.errors), + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "INVALID_PARAMETERS", + message: "Invalid VMID parameter", + details: validation.error.errors, + }, + }; + + res + .status(400) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + // Get node from query parameter (required) + const node = req.query.node as string; + if (!node) { + logger.warn("Missing node parameter", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "destroyGuest", + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Missing node parameter", + context: "Node parameter is required", + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "INVALID_PARAMETERS", + message: "Node parameter is required", + }, + }; + + res + .status(400) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + const vmid = parseInt(validation.data.vmid, 10); + + if (debugInfo) { + expertModeService.addDebug(debugInfo, { + message: "Destroy parameters validated", + context: JSON.stringify({ vmid, node }), + level: "debug", + }); + } + + try { + const result = await proxmox.executeAction({ + type: "task", + target: `proxmox:${node}:${vmid}`, + action: "destroy_vm", + parameters: { vmid, node }, + }); + + logger.info("Guest destruction completed", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "destroyGuest", + metadata: { vmid, node, status: result.status }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addInfo(debugInfo, { + message: "Guest destruction completed", + context: JSON.stringify({ status: result.status }), + level: "info", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const responseData = { result }; + + res + .status(result.status === "success" ? 200 : 500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(responseData, debugInfo) + : responseData + ); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + logger.error( + "Failed to destroy guest", + { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "destroyGuest", + metadata: { vmid, node, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addError(debugInfo, { + message: "Failed to destroy guest", + context: errorMessage, + level: "error", + stack: error instanceof Error ? error.stack : undefined, + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "GUEST_DESTRUCTION_FAILED", + message: errorMessage, + }, + }; + + res + .status(500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + } + }) + ); + + /** + * POST /api/integrations/proxmox/action + * Execute a lifecycle action on a VM or container + */ + router.post( + "/action", + asyncHandler(async (req: Request, res: Response): Promise => { + const startTime = Date.now(); + const expertModeService = new ExpertModeService(); + const requestId = req.id ?? expertModeService.generateRequestId(); + + const debugInfo = req.expertMode + ? expertModeService.createDebugInfo( + "POST /api/integrations/proxmox/action", + requestId, + 0 + ) + : null; + + logger.info("Executing Proxmox action", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "executeAction", + }); + + if (debugInfo) { + expertModeService.addInfo(debugInfo, { + message: "Executing Proxmox action", + level: "info", + }); + } + + const proxmox = getProxmoxIntegration(); + if (!proxmox) { + logger.warn("Proxmox integration is not configured", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "executeAction", + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Proxmox integration is not configured", + context: "Proxmox integration is not available", + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "PROXMOX_NOT_CONFIGURED", + message: "Proxmox integration is not configured", + }, + }; + + res + .status(503) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + // Validate request body + const validation = ActionParamsSchema.safeParse(req.body); + if (!validation.success) { + logger.warn("Invalid action parameters", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "executeAction", + metadata: { errors: validation.error.errors }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addWarning(debugInfo, { + message: "Invalid action parameters", + context: JSON.stringify(validation.error.errors), + level: "warn", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "INVALID_PARAMETERS", + message: "Invalid action parameters", + details: validation.error.errors, + }, + }; + + res + .status(400) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + return; + } + + const { nodeId, action } = validation.data; + + if (debugInfo) { + expertModeService.addDebug(debugInfo, { + message: "Action parameters validated", + context: JSON.stringify({ nodeId, action }), + level: "debug", + }); + } + + try { + const result = await proxmox.executeAction({ + type: "task", + target: nodeId, + action, + }); + + logger.info("Action execution completed", { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "executeAction", + metadata: { nodeId, action, status: result.status }, + }); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addInfo(debugInfo, { + message: "Action execution completed", + context: JSON.stringify({ status: result.status }), + level: "info", + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const responseData = { result }; + + res + .status(result.status === "success" ? 200 : 500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(responseData, debugInfo) + : responseData + ); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + logger.error( + "Failed to execute action", + { + component: "ProxmoxRouter", + integration: "proxmox", + operation: "executeAction", + metadata: { nodeId, action, error: errorMessage }, + }, + error instanceof Error ? error : undefined + ); + + if (debugInfo) { + debugInfo.duration = Date.now() - startTime; + expertModeService.addError(debugInfo, { + message: "Failed to execute action", + context: errorMessage, + level: "error", + stack: error instanceof Error ? error.stack : undefined, + }); + debugInfo.performance = + expertModeService.collectPerformanceMetrics(); + debugInfo.context = expertModeService.collectRequestContext(req); + } + + const errorResponse = { + error: { + code: "ACTION_EXECUTION_FAILED", + message: errorMessage, + }, + }; + + res + .status(500) + .json( + debugInfo + ? expertModeService.attachDebugInfo(errorResponse, debugInfo) + : errorResponse + ); + } + }) + ); + + return router; +} diff --git a/backend/src/routes/integrations/status.ts b/backend/src/routes/integrations/status.ts index 17bd1097..01ddc787 100644 --- a/backend/src/routes/integrations/status.ts +++ b/backend/src/routes/integrations/status.ts @@ -214,6 +214,50 @@ export function createStatusRouter( }); } + // Check if Proxmox is not configured + if (!configuredNames.has("proxmox")) { + logger.debug("Proxmox integration is not configured", { + component: "StatusRouter", + integration: "proxmox", + operation: "getStatus", + }); + integrations.push({ + name: "proxmox", + type: "both", + status: "not_configured", + lastCheck: new Date().toISOString(), + message: "Proxmox integration is not configured", + details: { + setupRequired: true, + setupUrl: "/setup/proxmox", + }, + workingCapabilities: undefined, + failingCapabilities: undefined, + }); + } + + // Check if AWS is not configured + if (!configuredNames.has("aws")) { + logger.debug("AWS integration is not configured", { + component: "StatusRouter", + integration: "aws", + operation: "getStatus", + }); + integrations.push({ + name: "aws", + type: "both", + status: "not_configured", + lastCheck: new Date().toISOString(), + message: "AWS integration is not configured", + details: { + setupRequired: true, + setupUrl: "/setup/aws", + }, + workingCapabilities: undefined, + failingCapabilities: undefined, + }); + } + const duration = Date.now() - startTime; const responseData = { integrations, diff --git a/backend/src/routes/inventory.ts b/backend/src/routes/inventory.ts index a5b172e8..6b74fa79 100644 --- a/backend/src/routes/inventory.ts +++ b/backend/src/routes/inventory.ts @@ -27,6 +27,7 @@ const InventoryQuerySchema = z.object({ export function createInventoryRouter( boltService: BoltService, integrationManager?: IntegrationManager, + options?: { allowDestructiveActions?: boolean }, ): Router { const router = Router(); const logger = new LoggerService(); @@ -110,14 +111,21 @@ export function createInventoryRouter( if (!requestedSources.includes("all")) { filteredNodes = aggregated.nodes.filter((node) => { - const nodeSource = (node as { source?: string }).source ?? "bolt"; - return requestedSources.includes(nodeSource); + // Check both 'sources' (plural, from linked nodes) and 'source' (singular, from single-source nodes) + const linkedNode = node as { source?: string; sources?: string[] }; + const nodeSources = linkedNode.sources && linkedNode.sources.length > 0 + ? linkedNode.sources + : [linkedNode.source ?? "bolt"]; + return nodeSources.some((s) => requestedSources.includes(s)); }); // Apply same source filtering to groups filteredGroups = aggregated.groups.filter((group) => { - const groupSource = (group as { source?: string }).source ?? "bolt"; - return requestedSources.includes(groupSource); + const linkedGroup = group as { source?: string; sources?: string[] }; + const groupSources = linkedGroup.sources && linkedGroup.sources.length > 0 + ? linkedGroup.sources + : [linkedGroup.source ?? "bolt"]; + return groupSources.some((s) => requestedSources.includes(s)); }); logger.debug("Filtered nodes and groups by source", { @@ -1115,5 +1123,373 @@ export function createInventoryRouter( }), ); + /** + * Map action names to the node states where they are available. + * This provides sensible defaults; the frontend can further refine. + */ + function getAvailableWhen(actionName: string): string[] { + const mapping: Record = { + start: ["stopped"], + stop: ["running", "paused"], + shutdown: ["running", "paused"], + reboot: ["running", "paused"], + suspend: ["running"], + resume: ["suspended", "paused"], + snapshot: ["running", "stopped"], + terminate: ["running", "stopped", "suspended", "paused", "unknown"], + destroy: ["stopped", "running", "suspended", "paused", "unknown"], + destroy_vm: ["stopped", "running", "suspended", "paused", "unknown"], + destroy_lxc: ["stopped", "running", "suspended", "paused", "unknown"], + }; + return mapping[actionName] ?? []; + } + + /** + * Resolve the provider name from a node ID prefix. + * Node IDs follow the pattern "{provider}:{...}" (e.g. "proxmox:node:vmid", "aws:region:instanceId"). + * Returns null when the prefix doesn't map to a known integration. + */ + function resolveProvider(nodeId: string): string | null { + const prefix = nodeId.split(":")[0]; + const providerMap: Record = { + proxmox: "proxmox", + aws: "aws", + }; + return providerMap[prefix] ?? null; + } + + /** + * Look up the execution tool for a given node ID. + * Returns the tool and provider name, or sends an error response and returns null. + */ + function getExecutionToolForNode( + nodeId: string, + res: Response, + ): { tool: import("../integrations/types").ExecutionToolPlugin; provider: string } | null { + if (!integrationManager?.isInitialized()) { + res.status(503).json({ + error: { code: "INTEGRATION_NOT_AVAILABLE", message: "Integration manager is not available" }, + }); + return null; + } + + const provider = resolveProvider(nodeId); + if (!provider) { + res.status(400).json({ + error: { + code: "UNSUPPORTED_PROVIDER", + message: `No provisioning provider found for node ID: ${nodeId}`, + }, + }); + return null; + } + + const tool = integrationManager.getExecutionTool(provider); + if (!tool) { + res.status(503).json({ + error: { + code: "PROVIDER_NOT_CONFIGURED", + message: `Integration "${provider}" is not configured`, + }, + }); + return null; + } + + return { tool, provider }; + } + + /** + * GET /api/nodes/:id/lifecycle-actions + * Discover available lifecycle actions for a node based on its provider. + * Returns actions with metadata so the frontend can render them dynamically. + */ + router.get( + "/:id/lifecycle-actions", + asyncHandler(async (req: Request, res: Response): Promise => { + const params = NodeIdParamSchema.parse(req.params); + const nodeId = params.id; + + const resolved = getExecutionToolForNode(nodeId, res); + if (!resolved) return; + + const { tool, provider } = resolved; + const capabilities = tool.listCapabilities(); + + // Build lifecycle action definitions from the provider's capabilities + const actions = capabilities.map((cap) => { + const isDestructive = ["destroy", "terminate", "destroy_vm", "destroy_lxc"].includes(cap.name); + return { + name: cap.name, + displayName: cap.name.charAt(0).toUpperCase() + cap.name.slice(1).replace(/_/g, " "), + description: cap.description, + requiresConfirmation: isDestructive, + destructive: isDestructive, + // Provider-specific availability hints; frontend can refine with node status + availableWhen: getAvailableWhen(cap.name), + }; + }); + + // Add destroy action from provisioning capabilities if not already present + const provisioningTool = tool as unknown as { listProvisioningCapabilities?: () => Array<{ name: string; description: string; operation: string }> }; + if (typeof provisioningTool.listProvisioningCapabilities === "function") { + const provCaps = provisioningTool.listProvisioningCapabilities(); + for (const cap of provCaps) { + if (cap.operation === "destroy" && !actions.some((a) => a.name === cap.name)) { + actions.push({ + name: cap.name, + displayName: cap.name.charAt(0).toUpperCase() + cap.name.slice(1).replace(/_/g, " "), + description: cap.description, + requiresConfirmation: true, + destructive: true, + availableWhen: ["stopped", "running", "suspended", "unknown"], + }); + } + } + } + + logger.info("Lifecycle actions resolved", { + component: "InventoryRouter", + operation: "getLifecycleActions", + metadata: { nodeId, provider, actionCount: actions.length }, + }); + + // Filter out destructive actions when destructive provisioning is disabled + const filteredActions = options?.allowDestructiveActions === false + ? actions.filter((a) => !a.destructive) + : actions; + + res.json({ provider, actions: filteredActions }); + }), + ); + + /** + * POST /api/nodes/:id/action + * Execute a lifecycle action on a node via its provider integration. + * Provider-agnostic: routes to the correct integration based on node ID prefix. + * + * Note: RBAC middleware should be applied at the route mounting level in server.ts + * Required permission: lifecycle:* or lifecycle:{action} + */ + router.post( + "/:id/action", + asyncHandler(async (req: Request, res: Response): Promise => { + const startTime = Date.now(); + + logger.info("Executing node action", { + component: "InventoryRouter", + operation: "executeNodeAction", + }); + + try { + const params = NodeIdParamSchema.parse(req.params); + const nodeId = params.id; + + // Accept any action string — the provider will validate it + const ActionSchema = z.object({ + action: z.string().min(1), + parameters: z.record(z.unknown()).optional(), + }); + const body = ActionSchema.parse(req.body); + + // Guard: reject destructive actions when disabled + const destructiveActions = ["destroy", "destroy_vm", "destroy_lxc", "terminate", "terminate_instance"]; + if (destructiveActions.includes(body.action) && options?.allowDestructiveActions === false) { + res.status(403).json({ + error: { + code: "DESTRUCTIVE_ACTION_DISABLED", + message: "Destructive provisioning actions are disabled by configuration (ALLOW_DESTRUCTIVE_PROVISIONING=false)", + }, + }); + return; + } + + const resolved = getExecutionToolForNode(nodeId, res); + if (!resolved) return; + + const { tool, provider } = resolved; + + logger.debug("Executing action on node", { + component: "InventoryRouter", + operation: "executeNodeAction", + metadata: { nodeId, provider, action: body.action }, + }); + + const result = await tool.executeAction({ + type: "task", + target: nodeId, + action: body.action, + parameters: body.parameters, + }); + + const duration = Date.now() - startTime; + + logger.info("Node action executed successfully", { + component: "InventoryRouter", + integration: provider, + operation: "executeNodeAction", + metadata: { nodeId, action: body.action, duration }, + }); + + res.json({ + success: true, + message: `Action ${body.action} executed successfully`, + result, + }); + } catch (error) { + const duration = Date.now() - startTime; + + if (error instanceof z.ZodError) { + logger.warn("Invalid request parameters", { + component: "InventoryRouter", + operation: "executeNodeAction", + metadata: { errors: error.errors }, + }); + + res.status(400).json({ + error: { + code: "INVALID_REQUEST", + message: "Invalid request parameters", + details: error.errors, + }, + }); + return; + } + + logger.error("Error executing node action", { + component: "InventoryRouter", + operation: "executeNodeAction", + metadata: { duration }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: "ACTION_EXECUTION_FAILED", + message: error instanceof Error ? error.message : "Failed to execute action", + }, + }); + } + }), + ); + + /** + * DELETE /api/nodes/:id + * Destroy a node (permanently delete VM, container, or cloud instance). + * Provider-agnostic: routes to the correct integration based on node ID prefix. + * + * Note: RBAC middleware should be applied at the route mounting level in server.ts + * Required permission: lifecycle:destroy + */ + router.delete( + "/:id", + asyncHandler(async (req: Request, res: Response): Promise => { + const startTime = Date.now(); + + logger.info("Destroying node", { + component: "InventoryRouter", + operation: "destroyNode", + }); + + try { + const params = NodeIdParamSchema.parse(req.params); + const nodeId = params.id; + + const resolved = getExecutionToolForNode(nodeId, res); + if (!resolved) return; + + const { tool, provider } = resolved; + + logger.debug("Destroying node", { + component: "InventoryRouter", + operation: "destroyNode", + metadata: { nodeId, provider }, + }); + + // Determine the correct destroy action based on provider + let destroyAction: string; + let destroyParams: Record | undefined; + + if (provider === "proxmox") { + const parts = nodeId.split(":"); + if (parts.length !== 3) { + res.status(400).json({ + error: { code: "INVALID_NODE_ID", message: "Invalid Proxmox node ID format" }, + }); + return; + } + const node = parts[1]; + const vmid = parseInt(parts[2], 10); + if (!Number.isFinite(vmid)) { + res.status(400).json({ + error: { code: "INVALID_NODE_ID", message: "Invalid Proxmox node ID: vmid is not a valid number" }, + }); + return; + } + destroyAction = "destroy_vm"; + destroyParams = { node, vmid }; + } else if (provider === "aws") { + destroyAction = "terminate"; + destroyParams = undefined; + } else { + destroyAction = "destroy"; + destroyParams = undefined; + } + + const result = await tool.executeAction({ + type: "task", + target: nodeId, + action: destroyAction, + parameters: destroyParams, + }); + + const duration = Date.now() - startTime; + + logger.info("Node destroyed successfully", { + component: "InventoryRouter", + integration: provider, + operation: "destroyNode", + metadata: { nodeId, duration }, + }); + + res.json({ + success: true, + message: "Node destroyed successfully", + result, + }); + } catch (error) { + const duration = Date.now() - startTime; + + if (error instanceof z.ZodError) { + logger.warn("Invalid request parameters", { + component: "InventoryRouter", + operation: "destroyNode", + metadata: { errors: error.errors }, + }); + + res.status(400).json({ + error: { + code: "INVALID_REQUEST", + message: "Invalid request parameters", + details: error.errors, + }, + }); + return; + } + + logger.error("Error destroying node", { + component: "InventoryRouter", + operation: "destroyNode", + metadata: { duration }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: "DESTROY_FAILED", + message: error instanceof Error ? error.message : "Failed to destroy node", + }, + }); + } + }), + ); + return router; } diff --git a/backend/src/routes/journal.ts b/backend/src/routes/journal.ts new file mode 100644 index 00000000..ab7c64ce --- /dev/null +++ b/backend/src/routes/journal.ts @@ -0,0 +1,235 @@ +import { Router, type Request, type Response } from "express"; +import { z } from "zod"; +import { ZodError } from "zod"; +import { asyncHandler } from "./asyncHandler"; +import { JournalService } from "../services/journal/JournalService"; +import type { DatabaseService } from "../database/DatabaseService"; +import { LoggerService } from "../services/LoggerService"; +import { sendValidationError, ERROR_CODES } from "../utils/errorHandling"; +import { createAuthMiddleware } from "../middleware/authMiddleware"; +import { createRbacMiddleware } from "../middleware/rbacMiddleware"; + +const logger = new LoggerService(); + +/** + * Zod schema for timeline query parameters + */ +const TimelineQuerySchema = z.object({ + limit: z.coerce.number().int().min(1).max(200).default(50), + offset: z.coerce.number().int().min(0).default(0), + startDate: z.string().datetime().optional(), + endDate: z.string().datetime().optional(), +}); + +/** + * Zod schema for adding a manual note + */ +const AddNoteSchema = z.object({ + content: z.string().min(1, "Note content is required").max(5000), +}); + +/** + * Zod schema for search query parameters + */ +const SearchQuerySchema = z.object({ + q: z.string().min(1, "Search query is required"), + limit: z.coerce.number().int().min(1).max(200).default(50), + offset: z.coerce.number().int().min(0).default(0), +}); + +/** + * Create journal routes + * + * Requirements: 22.4, 23.1, 24.1, 27.3 + */ +export function createJournalRouter(databaseService: DatabaseService): Router { + const router = Router(); + const journalService = new JournalService(databaseService.getConnection()); + const authMiddleware = createAuthMiddleware(databaseService.getConnection()); + const rbacMiddleware = createRbacMiddleware(databaseService.getConnection()); + + /** + * GET /api/journal/search + * Search journal entries across summary and details + * + * Requirements: 24.1 + */ + router.get( + "/search", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware("journal", "read")), + asyncHandler(async (req: Request, res: Response): Promise => { + logger.info("Processing journal search request", { + component: "JournalRouter", + operation: "searchEntries", + metadata: { userId: req.user?.userId }, + }); + + try { + const validatedQuery = SearchQuerySchema.parse(req.query); + + const entries = await journalService.searchEntries(validatedQuery.q, { + limit: validatedQuery.limit, + offset: validatedQuery.offset, + }); + + res.status(200).json({ entries }); + } catch (error) { + if (error instanceof ZodError) { + logger.warn("Journal search validation failed", { + component: "JournalRouter", + operation: "searchEntries", + metadata: { errors: error.errors }, + }); + sendValidationError(res, error); + return; + } + + logger.error("Journal search failed", { + component: "JournalRouter", + operation: "searchEntries", + metadata: { userId: req.user?.userId }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to search journal entries", + }, + }); + } + }) + ); + + /** + * GET /api/journal/:nodeId + * Get aggregated timeline for a node + * + * Requirements: 22.4, 23.1 + */ + router.get( + "/:nodeId", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware("journal", "read")), + asyncHandler(async (req: Request, res: Response): Promise => { + const { nodeId } = req.params; + + logger.info("Processing journal timeline request", { + component: "JournalRouter", + operation: "getTimeline", + metadata: { userId: req.user?.userId, nodeId }, + }); + + try { + const validatedQuery = TimelineQuerySchema.parse(req.query); + + const entries = await journalService.aggregateTimeline(nodeId, { + limit: validatedQuery.limit, + offset: validatedQuery.offset, + startDate: validatedQuery.startDate, + endDate: validatedQuery.endDate, + }); + + res.status(200).json({ entries }); + } catch (error) { + if (error instanceof ZodError) { + logger.warn("Journal timeline validation failed", { + component: "JournalRouter", + operation: "getTimeline", + metadata: { errors: error.errors }, + }); + sendValidationError(res, error); + return; + } + + logger.error("Journal timeline retrieval failed", { + component: "JournalRouter", + operation: "getTimeline", + metadata: { userId: req.user?.userId, nodeId }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to retrieve journal timeline", + }, + }); + } + }) + ); + + /** + * POST /api/journal/:nodeId/notes + * Add a manual note to a node's journal + * + * Requirements: 24.1 + */ + router.post( + "/:nodeId/notes", + asyncHandler(authMiddleware), + asyncHandler(rbacMiddleware("journal", "note")), + asyncHandler(async (req: Request, res: Response): Promise => { + const { nodeId } = req.params; + const userId = req.user?.userId; + + logger.info("Processing add journal note request", { + component: "JournalRouter", + operation: "addNote", + metadata: { userId, nodeId }, + }); + + if (!userId) { + res.status(401).json({ + error: { + code: ERROR_CODES.UNAUTHORIZED, + message: "Authentication required", + }, + }); + return; + } + + try { + const validatedBody = AddNoteSchema.parse(req.body); + + const entryId = await journalService.addNote( + nodeId, + userId, + validatedBody.content, + ); + + logger.info("Journal note added successfully", { + component: "JournalRouter", + operation: "addNote", + metadata: { userId, nodeId, entryId }, + }); + + res.status(201).json({ id: entryId }); + } catch (error) { + if (error instanceof ZodError) { + logger.warn("Add note validation failed", { + component: "JournalRouter", + operation: "addNote", + metadata: { errors: error.errors }, + }); + sendValidationError(res, error); + return; + } + + logger.error("Add journal note failed", { + component: "JournalRouter", + operation: "addNote", + metadata: { userId, nodeId }, + }, error instanceof Error ? error : undefined); + + res.status(500).json({ + error: { + code: ERROR_CODES.INTERNAL_SERVER_ERROR, + message: "Failed to add journal note", + }, + }); + } + }) + ); + + return router; +} diff --git a/backend/src/routes/roles.ts b/backend/src/routes/roles.ts index 0107e691..5e79ea77 100644 --- a/backend/src/routes/roles.ts +++ b/backend/src/routes/roles.ts @@ -2,6 +2,7 @@ import { Router, type Request, type Response } from "express"; import { z } from "zod"; import { asyncHandler } from "./asyncHandler"; import { RoleService } from "../services/RoleService"; +import { PermissionService } from "../services/PermissionService"; import type { DatabaseService } from "../database/DatabaseService"; import { LoggerService } from "../services/LoggerService"; import { sendValidationError, ERROR_CODES } from "../utils/errorHandling"; @@ -44,6 +45,7 @@ export function createRolesRouter( const router = Router(); const jwtSecret = process.env.JWT_SECRET; const roleService = new RoleService(databaseService.getConnection()); + const permissionService = new PermissionService(databaseService.getConnection()); const authMiddleware = createAuthMiddleware(databaseService.getConnection(), jwtSecret); const rbacMiddleware = createRbacMiddleware(databaseService.getConnection()); @@ -548,6 +550,9 @@ export function createRolesRouter( // Assign permission to role await roleService.assignPermissionToRole(roleId, permissionId); + // Invalidate permission cache for all users affected by this role (Requirement 30.2) + await permissionService.invalidateRolePermissionCache(roleId); + logger.info("Permission assigned to role successfully", { component: "RolesRouter", operation: "assignPermissionToRole", @@ -668,6 +673,9 @@ export function createRolesRouter( // Remove permission from role await roleService.removePermissionFromRole(roleId, permissionId); + // Invalidate permission cache for all users affected by this role (Requirement 30.2) + await permissionService.invalidateRolePermissionCache(roleId); + logger.info("Permission removed from role successfully", { component: "RolesRouter", operation: "removePermissionFromRole", diff --git a/backend/src/routes/streaming.ts b/backend/src/routes/streaming.ts index 9fa42eb1..78d5a285 100644 --- a/backend/src/routes/streaming.ts +++ b/backend/src/routes/streaming.ts @@ -26,6 +26,9 @@ export function createStreamingRouter( /** * GET /api/executions/:id/stream * Subscribe to streaming events for an execution + * + * Note: EventSource API doesn't support custom headers, so authentication + * token can be passed via query parameter as a fallback */ router.get( "/:id/stream", @@ -46,6 +49,18 @@ export function createStreamingRouter( }); try { + // Handle token from query parameter (EventSource doesn't support headers) + // Only move to Authorization header when no Authorization header is already present, + // then remove from query to reduce the chance of it being logged downstream. + if ( + typeof req.query.token === "string" && + !req.headers.authorization + ) { + req.headers.authorization = `Bearer ${req.query.token}`; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete (req.query as Record).token; + } + // Validate request parameters if (debugInfo) { expertModeService.addDebug(debugInfo, { diff --git a/backend/src/server.ts b/backend/src/server.ts index 7a008d15..953ffbb6 100644 --- a/backend/src/server.ts +++ b/backend/src/server.ts @@ -28,6 +28,10 @@ import { createUsersRouter } from "./routes/users"; import { createGroupsRouter } from "./routes/groups"; import { createRolesRouter } from "./routes/roles"; import { createPermissionsRouter } from "./routes/permissions"; +import { createJournalRouter } from "./routes/journal"; +import { createIntegrationConfigRouter } from "./routes/integrationConfig"; +import { createAWSRouter } from "./routes/integrations/aws"; +import { AWSPlugin } from "./integrations/aws/AWSPlugin"; import monitoringRouter from "./routes/monitoring"; import { StreamingExecutionManager } from "./services/StreamingExecutionManager"; import { ExecutionQueue } from "./services/ExecutionQueue"; @@ -52,10 +56,13 @@ import { AnsibleService } from "./integrations/ansible/AnsibleService"; import { AnsiblePlugin } from "./integrations/ansible/AnsiblePlugin"; import { SSHPlugin } from "./integrations/ssh/SSHPlugin"; import { loadSSHConfig } from "./integrations/ssh/config"; +import { ProxmoxIntegration } from "./integrations/proxmox/ProxmoxIntegration"; import type { IntegrationConfig } from "./integrations/types"; import { LoggerService } from "./services/LoggerService"; import { PerformanceMonitorService } from "./services/PerformanceMonitorService"; import { PuppetRunHistoryService } from "./services/PuppetRunHistoryService"; +import { IntegrationConfigService } from "./services/IntegrationConfigService"; +import { JournalService } from "./services/journal/JournalService"; /** * Initialize and start the application @@ -678,6 +685,203 @@ async function startServer(): Promise { operation: "initializeSSH", }); + // Create IntegrationConfigService early so all plugins can use DB config overrides + const integrationConfigService = new IntegrationConfigService( + databaseService.getConnection(), + process.env.JWT_SECRET ?? "default-secret", + (integrationName: string) => { + // Provide .env config as the base for each integration + const integration = config.integrations[integrationName as keyof typeof config.integrations]; + if (!integration) return {}; + return integration as unknown as Record; + }, + ); + logger.info("IntegrationConfigService initialized", { + component: "Server", + operation: "startServer", + }); + + // Initialize Proxmox integration only if configured + let proxmoxPlugin: ProxmoxIntegration | undefined; + const proxmoxConfig = config.integrations.proxmox; + const proxmoxConfigured = proxmoxConfig?.enabled === true; + + logger.debug("=== Proxmox Integration Setup ===", { + component: "Server", + operation: "initializeProxmox", + metadata: { + configured: proxmoxConfigured, + enabled: proxmoxConfig?.enabled, + hasHost: !!proxmoxConfig?.host, + }, + }); + + if (proxmoxConfigured && proxmoxConfig) { + logger.info("Initializing Proxmox integration...", { + component: "Server", + operation: "initializeProxmox", + }); + try { + proxmoxPlugin = new ProxmoxIntegration(logger, performanceMonitor); + logger.debug("ProxmoxIntegration instance created", { + component: "Server", + operation: "initializeProxmox", + }); + + // Merge .env config with DB-stored config (DB overrides .env) + const effectiveProxmoxConfig = await integrationConfigService.getEffectiveConfig("proxmox"); + const mergedProxmoxConfig = { + ...proxmoxConfig, + ...(Object.keys(effectiveProxmoxConfig).length > 0 ? effectiveProxmoxConfig : {}), + }; + + const integrationConfig: IntegrationConfig = { + enabled: true, + name: "proxmox", + type: "both", + config: mergedProxmoxConfig as unknown as Record, + priority: proxmoxConfig.priority ?? 7, // Default 7: between Bolt/PuppetDB (10) and Hiera (6) + }; + + logger.debug("Registering Proxmox plugin", { + component: "Server", + operation: "initializeProxmox", + metadata: { config: integrationConfig }, + }); + integrationManager.registerPlugin( + proxmoxPlugin, + integrationConfig, + ); + + logger.info("Proxmox integration registered successfully", { + component: "Server", + operation: "initializeProxmox", + metadata: { + enabled: true, + host: proxmoxConfig.host, + port: proxmoxConfig.port ?? 8006, + hasToken: !!proxmoxConfig.token, + hasPassword: !!proxmoxConfig.password, + priority: proxmoxConfig.priority ?? 7, + }, + }); + } catch (error) { + logger.warn(`WARNING: Failed to initialize Proxmox integration: ${error instanceof Error ? error.message : "Unknown error"}`, { + component: "Server", + operation: "initializeProxmox", + }); + if (error instanceof Error && error.stack) { + logger.error("Proxmox initialization error stack", { + component: "Server", + operation: "initializeProxmox", + }, error); + } + proxmoxPlugin = undefined; + } + } else { + logger.warn("Proxmox integration not configured - skipping registration", { + component: "Server", + operation: "initializeProxmox", + }); + logger.info("Set PROXMOX_ENABLED=true and PROXMOX_HOST to enable Proxmox integration", { + component: "Server", + operation: "initializeProxmox", + }); + } + logger.debug("=== End Proxmox Integration Setup ===", { + component: "Server", + operation: "initializeProxmox", + }); + + // Initialize AWS integration only if configured + let awsPlugin: AWSPlugin | undefined; + const awsConfig = config.integrations.aws; + const awsConfigured = awsConfig?.enabled === true; + + logger.debug("=== AWS Integration Setup ===", { + component: "Server", + operation: "initializeAWS", + metadata: { + configured: awsConfigured, + enabled: awsConfig?.enabled, + hasAccessKey: !!awsConfig?.accessKeyId, + region: awsConfig?.region, + }, + }); + + if (awsConfigured && awsConfig) { + logger.info("Initializing AWS integration...", { + component: "Server", + operation: "initializeAWS", + }); + try { + awsPlugin = new AWSPlugin(logger, performanceMonitor); + logger.debug("AWSPlugin instance created", { + component: "Server", + operation: "initializeAWS", + }); + + // Merge .env config with DB-stored config (DB overrides .env) + const effectiveAwsConfig = await integrationConfigService.getEffectiveConfig("aws"); + const mergedAwsConfig = { + ...awsConfig, + ...(Object.keys(effectiveAwsConfig).length > 0 ? effectiveAwsConfig : {}), + }; + + const integrationConfig: IntegrationConfig = { + enabled: true, + name: "aws", + type: "both", + config: mergedAwsConfig as unknown as Record, + priority: 7, + }; + + logger.debug("Registering AWS plugin", { + component: "Server", + operation: "initializeAWS", + metadata: { config: { ...integrationConfig, config: { region: mergedAwsConfig.region } } }, + }); + integrationManager.registerPlugin(awsPlugin, integrationConfig); + + logger.info("AWS integration registered successfully", { + component: "Server", + operation: "initializeAWS", + metadata: { + enabled: true, + region: mergedAwsConfig.region ?? "us-east-1", + regions: mergedAwsConfig.regions, + hasAccessKey: !!mergedAwsConfig.accessKeyId, + priority: 7, + }, + }); + } catch (error) { + logger.warn(`WARNING: Failed to initialize AWS integration: ${error instanceof Error ? error.message : "Unknown error"}`, { + component: "Server", + operation: "initializeAWS", + }); + if (error instanceof Error && error.stack) { + logger.error("AWS initialization error stack", { + component: "Server", + operation: "initializeAWS", + }, error); + } + awsPlugin = undefined; + } + } else { + logger.warn("AWS integration not configured - skipping registration", { + component: "Server", + operation: "initializeAWS", + }); + logger.info("Set AWS_ENABLED=true and AWS_ACCESS_KEY_ID to enable AWS integration", { + component: "Server", + operation: "initializeAWS", + }); + } + logger.debug("=== End AWS Integration Setup ===", { + component: "Server", + operation: "initializeAWS", + }); + // Initialize all registered plugins logger.info("=== Initializing All Integration Plugins ===", { component: "Server", @@ -746,6 +950,23 @@ async function startServer(): Promise { operation: "initializePlugins", }); + // Create shared JournalService and wire it to plugins + const journalService = new JournalService(databaseService.getConnection()); + if (proxmoxPlugin) { + proxmoxPlugin.setJournalService(journalService); + logger.info("JournalService wired to ProxmoxIntegration", { + component: "Server", + operation: "wireJournalService", + }); + } + if (awsPlugin) { + awsPlugin.setJournalService(journalService); + logger.info("JournalService wired to AWSPlugin", { + component: "Server", + operation: "wireJournalService", + }); + } + // Make integration manager available globally for cross-service access (global as Record).integrationManager = integrationManager; @@ -879,6 +1100,12 @@ async function startServer(): Promise { // Permission management routes app.use("/api/permissions", authMiddleware, rateLimitMiddleware, createPermissionsRouter(databaseService)); + // Journal routes + app.use("/api/journal", authMiddleware, rateLimitMiddleware, createJournalRouter(databaseService)); + + // Integration config routes + app.use("/api/config/integrations", authMiddleware, rateLimitMiddleware, createIntegrationConfigRouter(databaseService)); + // Monitoring routes (performance metrics) app.use("/api/monitoring", authMiddleware, rateLimitMiddleware, monitoringRouter); @@ -888,14 +1115,18 @@ async function startServer(): Promise { authMiddleware, rateLimitMiddleware, rbacMiddleware('ansible', 'read'), - createInventoryRouter(boltService, integrationManager), + createInventoryRouter(boltService, integrationManager, { + allowDestructiveActions: config.provisioning.allowDestructiveActions, + }), ); app.use( "/api/nodes", authMiddleware, rateLimitMiddleware, rbacMiddleware('ansible', 'read'), - createInventoryRouter(boltService, integrationManager), + createInventoryRouter(boltService, integrationManager, { + allowDestructiveActions: config.provisioning.allowDestructiveActions, + }), ); app.use( "/api/nodes", @@ -1019,6 +1250,7 @@ async function startServer(): Promise { puppetserverService, databaseService.getConnection(), undefined, // JWT secret is read from environment by AuthenticationService + { allowDestructiveProvisioning: config.provisioning.allowDestructiveActions }, ), ); app.use( @@ -1027,6 +1259,20 @@ async function startServer(): Promise { rateLimitMiddleware, createHieraRouter(integrationManager), ); + + // AWS integration routes (conditional on plugin availability) + const awsPluginInstance = integrationManager.getExecutionTool("aws") as AWSPlugin | null; + if (awsPluginInstance) { + app.use( + "/api/integrations/aws", + authMiddleware, + rateLimitMiddleware, + createAWSRouter(awsPluginInstance, integrationManager, { + allowDestructiveActions: config.provisioning.allowDestructiveActions, + }), + ); + } + app.use( "/api/debug", authMiddleware, diff --git a/backend/src/services/AuditLoggingService.ts b/backend/src/services/AuditLoggingService.ts index 6d115a70..79548223 100644 --- a/backend/src/services/AuditLoggingService.ts +++ b/backend/src/services/AuditLoggingService.ts @@ -1,4 +1,4 @@ -import type { Database } from 'sqlite3'; +import type { DatabaseAdapter } from '../database/DatabaseAdapter'; import { randomUUID } from 'crypto'; /** @@ -56,6 +56,7 @@ export enum AuditAction { GROUP_MEMBER_REMOVED = 'group_member_removed' } + /** * Result of an audited action */ @@ -136,9 +137,9 @@ interface AuditStatisticsRow { * Requirements: 13.1, 13.2, 13.3, 13.4, 13.6, 13.7 */ export class AuditLoggingService { - private db: Database; + private db: DatabaseAdapter; - constructor(db: Database) { + constructor(db: DatabaseAdapter) { this.db = db; } @@ -379,7 +380,7 @@ export class AuditLoggingService { entry.result ]; - await this.runQuery(sql, params); + await this.db.execute(sql, params); } /** @@ -437,7 +438,7 @@ export class AuditLoggingService { params.push(filters.offset); } - const rows = await this.allQuery(sql, params); + const rows = await this.db.query(sql, params); return rows.map(row => ({ id: row.id, @@ -520,16 +521,8 @@ export class AuditLoggingService { const cutoffTimestamp = cutoffDate.toISOString(); const sql = 'DELETE FROM audit_logs WHERE timestamp < ?'; - - return new Promise((resolve, reject) => { - this.db.run(sql, [cutoffTimestamp], function(err) { - if (err) { - reject(err); - } else { - resolve(this.changes); - } - }); - }); + const result = await this.db.execute(sql, [cutoffTimestamp]); + return result.changes; } /** @@ -566,7 +559,7 @@ export class AuditLoggingService { WHERE ${whereClause} `; - const result = await this.getQuery(sql, params); + const result = await this.db.queryOne(sql, params); return { totalLogs: result?.totalLogs ?? 0, @@ -576,32 +569,4 @@ export class AuditLoggingService { adminActions: result?.adminActions ?? 0 }; } - - // Database helper methods - private runQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - } - - private getQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.get(sql, params, (err, row) => { - if (err) reject(err); - else resolve(row as T || null); - }); - }); - } - - private allQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.all(sql, params, (err, rows) => { - if (err) reject(err); - else resolve(rows as T[]); - }); - }); - } } diff --git a/backend/src/services/AuthenticationService.ts b/backend/src/services/AuthenticationService.ts index 71ed8018..2dbf9efb 100644 --- a/backend/src/services/AuthenticationService.ts +++ b/backend/src/services/AuthenticationService.ts @@ -1,6 +1,6 @@ import bcrypt from 'bcrypt'; import jwt from 'jsonwebtoken'; -import type { Database } from 'sqlite3'; +import type { DatabaseAdapter } from '../database/DatabaseAdapter'; import crypto from 'crypto'; import type { AuditLoggingService } from './AuditLoggingService'; import { performanceMonitor } from './PerformanceMonitor'; @@ -94,14 +94,14 @@ interface DecodedTokenPayload { * - Enforce password policies */ export class AuthenticationService { - private db: Database; + private db: DatabaseAdapter; private jwtSecret: string; private accessTokenLifetime = 3600; // 1 hour in seconds private refreshTokenLifetime = 604800; // 7 days in seconds private bcryptCostFactor = 10; private auditLogger?: AuditLoggingService; - constructor(db: Database, jwtSecret?: string, auditLogger?: AuditLoggingService) { + constructor(db: DatabaseAdapter, jwtSecret?: string, auditLogger?: AuditLoggingService) { this.db = db; this.auditLogger = auditLogger; @@ -480,7 +480,7 @@ export class AuthenticationService { const expiresAt = new Date(decoded.exp * 1000).toISOString(); const revokedAt = new Date().toISOString(); - await this.runQuery( + await this.db.execute( `INSERT INTO revoked_tokens (token, userId, revokedAt, expiresAt) VALUES (?, ?, ?, ?)`, [tokenHash, decoded.userId, revokedAt, expiresAt] @@ -505,20 +505,20 @@ export class AuthenticationService { const markerToken = `user_revoke_all_${userId}`; // First, try to update existing marker - const existing = await this.getQuery<{ token: string }>( + const existing = await this.db.queryOne<{ token: string }>( 'SELECT token FROM revoked_tokens WHERE token = ?', [markerToken] ); if (existing) { // Update existing marker with new revocation time - await this.runQuery( + await this.db.execute( `UPDATE revoked_tokens SET revokedAt = ?, expiresAt = ? WHERE token = ?`, [revokedAt, expiresAt, markerToken] ); } else { // Insert new marker - await this.runQuery( + await this.db.execute( `INSERT INTO revoked_tokens (token, userId, revokedAt, expiresAt) VALUES (?, ?, ?, ?)`, [markerToken, userId, revokedAt, expiresAt] @@ -543,7 +543,7 @@ export class AuthenticationService { const tokenHash = crypto.createHash('sha256').update(token).digest('hex'); // Check if specific token is revoked - const revokedToken = await this.getQuery<{ token: string }>( + const revokedToken = await this.db.queryOne<{ token: string }>( 'SELECT token FROM revoked_tokens WHERE token = ? AND expiresAt > ?', [tokenHash, new Date().toISOString()] ); @@ -553,7 +553,7 @@ export class AuthenticationService { } // Check if all user tokens are revoked - const userRevocation = await this.getQuery<{ revokedAt: string }>( + const userRevocation = await this.db.queryOne<{ revokedAt: string }>( `SELECT revokedAt FROM revoked_tokens WHERE token = ? AND expiresAt > ?`, [`user_revoke_all_${decoded.userId}`, new Date().toISOString()] @@ -577,7 +577,7 @@ export class AuthenticationService { * Get user by username (including inactive users) */ private async getUserByUsernameIncludingInactive(username: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM users WHERE username = ?', [username] ); @@ -587,7 +587,7 @@ export class AuthenticationService { * Get user by ID */ private async getUserById(userId: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM users WHERE id = ?', [userId] ); @@ -597,7 +597,7 @@ export class AuthenticationService { * Get user roles */ private async getUserRoles(userId: string): Promise { - const roles = await this.allQuery<{ name: string }>( + const roles = await this.db.query<{ name: string }>( `SELECT DISTINCT r.name FROM roles r WHERE r.id IN ( SELECT roleId FROM user_roles WHERE userId = ? @@ -617,7 +617,7 @@ export class AuthenticationService { */ private async updateLastLogin(userId: string): Promise { const now = new Date().toISOString(); - await this.runQuery( + await this.db.execute( 'UPDATE users SET lastLoginAt = ? WHERE id = ?', [now, userId] ); @@ -654,7 +654,7 @@ export class AuthenticationService { private async checkAccountLockout(username: string): Promise<{ isLocked: boolean; reason?: string }> { try { // Check for existing lockout - const lockout = await this.getQuery<{ + const lockout = await this.db.queryOne<{ lockoutType: string; lockedUntil: string | null; failedAttempts: number; @@ -685,7 +685,7 @@ export class AuthenticationService { }; } else { // Temporary lockout expired - remove it - await this.runQuery('DELETE FROM account_lockouts WHERE username = ?', [username]); + await this.db.execute('DELETE FROM account_lockouts WHERE username = ?', [username]); } } } @@ -722,14 +722,14 @@ export class AuthenticationService { const timestamp = now.toISOString(); // Record the failed attempt - await this.runQuery( + await this.db.execute( `INSERT INTO failed_login_attempts (username, attemptedAt, ipAddress, reason) VALUES (?, ?, ?, ?)`, [username, timestamp, ipAddress ?? null, reason] ); // Count total failed attempts (not just within window, for permanent lockout) - const totalAttempts = await this.getQuery<{ count: number }>( + const totalAttempts = await this.db.queryOne<{ count: number }>( `SELECT COUNT(*) as count FROM failed_login_attempts WHERE username = ?`, [username] @@ -745,7 +745,7 @@ export class AuthenticationService { // Count recent failed attempts (within the lockout window) for temporary lockout const windowStart = new Date(now.getTime() - this.TEMP_LOCKOUT_WINDOW_MINUTES * 60000); - const recentAttempts = await this.getQuery<{ count: number }>( + const recentAttempts = await this.db.queryOne<{ count: number }>( `SELECT COUNT(*) as count FROM failed_login_attempts WHERE username = ? AND attemptedAt >= ?`, [username, windowStart.toISOString()] @@ -776,14 +776,14 @@ export class AuthenticationService { const lockedUntil = new Date(now.getTime() + this.TEMP_LOCKOUT_DURATION_MINUTES * 60000).toISOString(); // Check if lockout already exists - const existing = await this.getQuery<{ username: string }>( + const existing = await this.db.queryOne<{ username: string }>( 'SELECT username FROM account_lockouts WHERE username = ?', [username] ); if (existing) { // Update existing lockout - await this.runQuery( + await this.db.execute( `UPDATE account_lockouts SET lockoutType = ?, lockedAt = ?, lockedUntil = ?, failedAttempts = ?, lastAttemptAt = ? WHERE username = ?`, @@ -791,7 +791,7 @@ export class AuthenticationService { ); } else { // Insert new lockout - await this.runQuery( + await this.db.execute( `INSERT INTO account_lockouts (username, lockoutType, lockedAt, lockedUntil, failedAttempts, lastAttemptAt) VALUES (?, ?, ?, ?, ?, ?)`, [username, 'temporary', lockedAt, lockedUntil, failedAttempts, lockedAt] @@ -816,14 +816,14 @@ export class AuthenticationService { const lockedAt = now.toISOString(); // Check if lockout already exists - const existing = await this.getQuery<{ username: string }>( + const existing = await this.db.queryOne<{ username: string }>( 'SELECT username FROM account_lockouts WHERE username = ?', [username] ); if (existing) { // Update to permanent lockout - await this.runQuery( + await this.db.execute( `UPDATE account_lockouts SET lockoutType = ?, lockedAt = ?, lockedUntil = NULL, failedAttempts = ?, lastAttemptAt = ? WHERE username = ?`, @@ -831,7 +831,7 @@ export class AuthenticationService { ); } else { // Insert new permanent lockout - await this.runQuery( + await this.db.execute( `INSERT INTO account_lockouts (username, lockoutType, lockedAt, lockedUntil, failedAttempts, lastAttemptAt) VALUES (?, ?, ?, NULL, ?, ?)`, [username, 'permanent', lockedAt, failedAttempts, lockedAt] @@ -852,13 +852,13 @@ export class AuthenticationService { private async clearFailedLoginAttempts(username: string): Promise { try { // Remove failed attempts - await this.runQuery( + await this.db.execute( 'DELETE FROM failed_login_attempts WHERE username = ?', [username] ); // Remove any temporary lockouts (permanent lockouts remain) - await this.runQuery( + await this.db.execute( `DELETE FROM account_lockouts WHERE username = ? AND lockoutType = 'temporary'`, [username] ); @@ -876,10 +876,10 @@ export class AuthenticationService { public async unlockAccount(username: string): Promise { try { // Remove all lockouts - await this.runQuery('DELETE FROM account_lockouts WHERE username = ?', [username]); + await this.db.execute('DELETE FROM account_lockouts WHERE username = ?', [username]); // Clear failed attempts - await this.runQuery('DELETE FROM failed_login_attempts WHERE username = ?', [username]); + await this.db.execute('DELETE FROM failed_login_attempts WHERE username = ?', [username]); console.warn(`[ADMIN] Account unlocked: ${username}`); } catch (error) { @@ -900,7 +900,7 @@ export class AuthenticationService { reason: string; }[]> { try { - return await this.allQuery<{ + return await this.db.query<{ attemptedAt: string; ipAddress: string | null; reason: string; @@ -930,7 +930,7 @@ export class AuthenticationService { failedAttempts: number; } | null> { try { - return await this.getQuery<{ + return await this.db.queryOne<{ lockoutType: string; lockedAt: string; lockedUntil: string | null; @@ -964,42 +964,6 @@ export class AuthenticationService { }; } - /** - * Helper: Run a query that doesn't return rows - */ - private runQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - } - - /** - * Helper: Get a single row - */ - private getQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.get(sql, params, (err, row) => { - if (err) reject(err); - else resolve(row as T || null); - }); - }); - } - - /** - * Helper: Get all rows - */ - private allQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.all(sql, params, (err, rows) => { - if (err) reject(err); - else resolve(rows as T[]); - }); - }); - } - // Brute force protection constants private readonly TEMP_LOCKOUT_ATTEMPTS = 5; private readonly TEMP_LOCKOUT_WINDOW_MINUTES = 15; diff --git a/backend/src/services/BatchExecutionService.ts b/backend/src/services/BatchExecutionService.ts index fff66f02..d3baff93 100644 --- a/backend/src/services/BatchExecutionService.ts +++ b/backend/src/services/BatchExecutionService.ts @@ -1,4 +1,4 @@ -import type sqlite3 from "sqlite3"; +import type { DatabaseAdapter } from "../database/DatabaseAdapter"; import type { ExecutionQueue } from "./ExecutionQueue"; import type { ExecutionRepository, NodeResult } from "../database/ExecutionRepository"; import type { IntegrationManager } from "../integrations/IntegrationManager"; @@ -184,7 +184,7 @@ export interface BatchStatusResponse { */ export class BatchExecutionService { constructor( - private db: sqlite3.Database, + private db: DatabaseAdapter, private executionQueue: ExecutionQueue, private executionRepository: ExecutionRepository, private integrationManager: IntegrationManager, @@ -304,16 +304,7 @@ export class BatchExecutionService { 0, ]; - await new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) { - logger.error(`Failed to create batch execution record: ${err.message}`); - reject(new Error(`Failed to create batch execution: ${err.message}`)); - } else { - resolve(); - } - }); - }); + await this.db.execute(sql, params); logger.info( `Created batch execution ${batchId} with ${String(executionIds.length)} executions`, @@ -332,178 +323,147 @@ export class BatchExecutionService { * Get batch execution status * * Fetches batch details and aggregates status from all individual executions. + * Supports optional status filtering. + * + * **Validates: Requirements 6.2, 6.3, 6.4, 6.8** * * @param batchId - Batch execution ID + * @param statusFilter - Optional status filter for executions * @returns Batch status with aggregated statistics + * @throws Error if batch ID does not exist */ - /** - * Get batch execution status - * - * Fetches batch details and aggregates status from all individual executions. - * Supports optional status filtering. - * - * **Validates: Requirements 6.2, 6.3, 6.4, 6.8** - * - * @param batchId - Batch execution ID - * @param statusFilter - Optional status filter for executions - * @returns Batch status with aggregated statistics - * @throws Error if batch ID does not exist - */ - async getBatchStatus( - batchId: string, - statusFilter?: string - ): Promise { - const logger = new LoggerService(); - - // Step 1: Fetch batch execution record - const batchSql = "SELECT * FROM batch_executions WHERE id = ?"; - const batchRow = await new Promise((resolve, reject) => { - this.db.get(batchSql, [batchId], (err, row) => { - if (err) { - logger.error(`Failed to fetch batch execution: ${err.message}`); - reject(new Error(`Failed to fetch batch execution: ${err.message}`)); - } else { - resolve(row as BatchExecutionRow | undefined); - } - }); - }); + async getBatchStatus( + batchId: string, + statusFilter?: string + ): Promise { + const logger = new LoggerService(); - if (!batchRow) { - throw new Error(`Batch execution ${batchId} not found`); - } + // Step 1: Fetch batch execution record + const batchRow = await this.db.queryOne( + "SELECT * FROM batch_executions WHERE id = ?", + [batchId] + ); - // Step 2: Fetch all executions for this batch - let executionsSql = "SELECT * FROM executions WHERE batch_id = ? ORDER BY batch_position ASC"; - const executionsParams: (string | number)[] = [batchId]; + if (!batchRow) { + throw new Error(`Batch execution ${batchId} not found`); + } - // Apply status filter if provided - if (statusFilter) { - executionsSql = "SELECT * FROM executions WHERE batch_id = ? AND status = ? ORDER BY batch_position ASC"; - executionsParams.push(statusFilter); - } + // Step 2: Fetch all executions for this batch + let executionsSql = "SELECT * FROM executions WHERE batch_id = ? ORDER BY batch_position ASC"; + const executionsParams: (string | number)[] = [batchId]; - const executionRows = await new Promise((resolve, reject) => { - this.db.all(executionsSql, executionsParams, (err, rows) => { - if (err) { - logger.error(`Failed to fetch executions for batch: ${err.message}`); - reject(new Error(`Failed to fetch executions: ${err.message}`)); - } else { - resolve(rows as ExecutionRow[]); - } - }); - }); + // Apply status filter if provided + if (statusFilter) { + executionsSql = "SELECT * FROM executions WHERE batch_id = ? AND status = ? ORDER BY batch_position ASC"; + executionsParams.push(statusFilter); + } - // Step 3: Get node names from inventory - const inventory = await this.integrationManager.getAggregatedInventory(); - const nodeMap = new Map(inventory.nodes.map(n => [n.id, n.name])); - - // Step 4: Map execution rows to response format - const executions = executionRows.map(row => { - const nodeId = (JSON.parse(row.target_nodes) as string[])[0]; // Get first node ID - const nodeName = nodeMap.get(nodeId) ?? nodeId; - - // Parse results if available - let result: { exitCode?: number; stdout?: string; stderr?: string } | undefined = undefined; - if (row.results) { - try { - const results = JSON.parse(row.results) as NodeResult[]; - if (results.length > 0) { - const nodeResult = results[0]; - result = { - exitCode: nodeResult.output?.exitCode, - stdout: nodeResult.output?.stdout ?? row.stdout ?? undefined, - stderr: nodeResult.output?.stderr ?? row.stderr ?? undefined, - }; - } - } catch { - logger.warn(`Failed to parse results for execution ${row.id}`); + const executionRows = await this.db.query(executionsSql, executionsParams); + + // Step 3: Get node names from inventory + const inventory = await this.integrationManager.getAggregatedInventory(); + const nodeMap = new Map(inventory.nodes.map(n => [n.id, n.name])); + + // Step 4: Map execution rows to response format + const executions = executionRows.map(row => { + const nodeId = (JSON.parse(row.target_nodes) as string[])[0]; // Get first node ID + const nodeName = nodeMap.get(nodeId) ?? nodeId; + + // Parse results if available + let result: { exitCode?: number; stdout?: string; stderr?: string } | undefined = undefined; + if (row.results) { + try { + const results = JSON.parse(row.results) as NodeResult[]; + if (results.length > 0) { + const nodeResult = results[0]; + result = { + exitCode: nodeResult.output?.exitCode, + stdout: nodeResult.output?.stdout ?? row.stdout ?? undefined, + stderr: nodeResult.output?.stderr ?? row.stderr ?? undefined, + }; } + } catch { + logger.warn(`Failed to parse results for execution ${row.id}`); } + } - // Calculate duration if completed - let duration: number | undefined; - if (row.started_at && row.completed_at) { - const startTime = new Date(row.started_at).getTime(); - const endTime = new Date(row.completed_at).getTime(); - duration = endTime - startTime; - } + // Calculate duration if completed + let duration: number | undefined; + if (row.started_at && row.completed_at) { + const startTime = new Date(row.started_at).getTime(); + const endTime = new Date(row.completed_at).getTime(); + duration = endTime - startTime; + } - return { - id: row.id, - nodeId, - nodeName, - status: row.status, - startedAt: row.started_at ? new Date(row.started_at) : undefined, - completedAt: row.completed_at ? new Date(row.completed_at) : undefined, - duration, - result, - }; - }); + return { + id: row.id, + nodeId, + nodeName, + status: row.status, + startedAt: row.started_at ? new Date(row.started_at) : undefined, + completedAt: row.completed_at ? new Date(row.completed_at) : undefined, + duration, + result, + }; + }); - // Step 5: Aggregate statistics from all executions (not filtered) - const allExecutionsSql = "SELECT status, started_at FROM executions WHERE batch_id = ?"; - const allExecutionRows = await new Promise((resolve, reject) => { - this.db.all(allExecutionsSql, [batchId], (err, rows) => { - if (err) { - logger.error(`Failed to fetch all executions for stats: ${err.message}`); - reject(new Error(`Failed to fetch executions: ${err.message}`)); - } else { - resolve(rows as ExecutionStatusRow[]); - } - }); - }); + // Step 5: Aggregate statistics from all executions (not filtered) + const allExecutionRows = await this.db.query( + "SELECT status, started_at FROM executions WHERE batch_id = ?", + [batchId] + ); - const stats = { - total: allExecutionRows.length, - queued: allExecutionRows.filter(r => r.status === "running" && !r.started_at).length, - running: allExecutionRows.filter(r => r.status === "running").length, - success: allExecutionRows.filter(r => r.status === "success").length, - failed: allExecutionRows.filter(r => r.status === "failed").length, - }; + const stats = { + total: allExecutionRows.length, + queued: allExecutionRows.filter(r => r.status === "running" && !r.started_at).length, + running: allExecutionRows.filter(r => r.status === "running").length, + success: allExecutionRows.filter(r => r.status === "success").length, + failed: allExecutionRows.filter(r => r.status === "failed").length, + }; - // Step 6: Calculate progress percentage - const completedCount = stats.success + stats.failed; - const progress = stats.total > 0 ? Math.round((completedCount / stats.total) * 100) : 0; - - // Step 7: Determine batch status - let batchStatus: "running" | "success" | "failed" | "partial" | "cancelled" = "running"; - if (completedCount === stats.total) { - if (stats.success === stats.total) { - batchStatus = "success"; - } else if (stats.failed === stats.total) { - batchStatus = "failed"; - } else { - batchStatus = "partial"; - } + // Step 6: Calculate progress percentage + const completedCount = stats.success + stats.failed; + const progress = stats.total > 0 ? Math.round((completedCount / stats.total) * 100) : 0; + + // Step 7: Determine batch status + let batchStatus: "running" | "success" | "failed" | "partial" | "cancelled" = "running"; + if (completedCount === stats.total) { + if (stats.success === stats.total) { + batchStatus = "success"; + } else if (stats.failed === stats.total) { + batchStatus = "failed"; + } else { + batchStatus = "partial"; } + } - // Step 8: Build batch execution object - const batch: BatchExecution = { - id: batchRow.id, - type: batchRow.type as "command" | "task" | "plan", - action: batchRow.action, - parameters: batchRow.parameters ? JSON.parse(batchRow.parameters) as Record : undefined, - targetNodes: JSON.parse(batchRow.target_nodes) as string[], - targetGroups: JSON.parse(batchRow.target_groups) as string[], - status: batchStatus, - createdAt: new Date(batchRow.created_at), - startedAt: batchRow.started_at ? new Date(batchRow.started_at) : undefined, - completedAt: batchRow.completed_at ? new Date(batchRow.completed_at) : undefined, - userId: batchRow.user_id, - executionIds: JSON.parse(batchRow.execution_ids) as string[], - stats, - }; + // Step 8: Build batch execution object + const batch: BatchExecution = { + id: batchRow.id, + type: batchRow.type as "command" | "task" | "plan", + action: batchRow.action, + parameters: batchRow.parameters ? JSON.parse(batchRow.parameters) as Record : undefined, + targetNodes: JSON.parse(batchRow.target_nodes) as string[], + targetGroups: JSON.parse(batchRow.target_groups) as string[], + status: batchStatus, + createdAt: new Date(batchRow.created_at), + startedAt: batchRow.started_at ? new Date(batchRow.started_at) : undefined, + completedAt: batchRow.completed_at ? new Date(batchRow.completed_at) : undefined, + userId: batchRow.user_id, + executionIds: JSON.parse(batchRow.execution_ids) as string[], + stats, + }; - logger.info( - `Fetched batch status for ${batchId}: ${String(stats.success)}/${String(stats.total)} success, ${String(stats.failed)}/${String(stats.total)} failed, ${String(progress)}% complete` - ); + logger.info( + `Fetched batch status for ${batchId}: ${String(stats.success)}/${String(stats.total)} success, ${String(stats.failed)}/${String(stats.total)} failed, ${String(progress)}% complete` + ); - return { - batch, - executions, - progress, - }; - } + return { + batch, + executions, + progress, + }; + } /** * Cancel a batch execution @@ -517,17 +477,10 @@ export class BatchExecutionService { const logger = new LoggerService(); // Step 1: Verify batch exists - const batchSql = "SELECT * FROM batch_executions WHERE id = ?"; - const batchRow = await new Promise((resolve, reject) => { - this.db.get(batchSql, [batchId], (err, row) => { - if (err) { - logger.error(`Failed to fetch batch execution: ${err.message}`); - reject(new Error(`Failed to fetch batch execution: ${err.message}`)); - } else { - resolve(row as BatchExecutionRow | undefined); - } - }); - }); + const batchRow = await this.db.queryOne( + "SELECT * FROM batch_executions WHERE id = ?", + [batchId] + ); if (!batchRow) { throw new Error(`Batch execution ${batchId} not found`); @@ -540,16 +493,8 @@ export class BatchExecutionService { WHERE batch_id = ? AND status = 'running' `; - const cancelledCount = await new Promise((resolve, reject) => { - this.db.run(cancelSql, [new Date().toISOString(), batchId], function(err) { - if (err) { - logger.error(`Failed to cancel executions: ${err.message}`); - reject(new Error(`Failed to cancel executions: ${err.message}`)); - } else { - resolve(this.changes); - } - }); - }); + const cancelResult = await this.db.execute(cancelSql, [new Date().toISOString(), batchId]); + const cancelledCount = cancelResult.changes; // Step 3: Update batch status to cancelled const updateBatchSql = ` @@ -558,22 +503,14 @@ export class BatchExecutionService { WHERE id = ? `; - await new Promise((resolve, reject) => { - this.db.run(updateBatchSql, [new Date().toISOString(), batchId], (err) => { - if (err) { - logger.error(`Failed to update batch status: ${err.message}`); - reject(new Error(`Failed to update batch status: ${err.message}`)); - } else { - resolve(); - } - }); - }); + await this.db.execute(updateBatchSql, [new Date().toISOString(), batchId]); logger.info(`Cancelled batch ${batchId}: ${String(cancelledCount)} executions cancelled`); return { cancelledCount }; } + /** * Expand group IDs to node IDs * diff --git a/backend/src/services/GroupService.ts b/backend/src/services/GroupService.ts index 81a1855d..0d2d9dfd 100644 --- a/backend/src/services/GroupService.ts +++ b/backend/src/services/GroupService.ts @@ -1,4 +1,4 @@ -import type { Database } from 'sqlite3'; +import type { DatabaseAdapter } from '../database/DatabaseAdapter'; import { randomUUID } from 'crypto'; /** @@ -97,9 +97,9 @@ export interface Role { * - Validate group data */ export class GroupService { - private db: Database; + private db: DatabaseAdapter; - constructor(db: Database) { + constructor(db: DatabaseAdapter) { this.db = db; } @@ -122,7 +122,7 @@ export class GroupService { const now = new Date().toISOString(); // Insert group - await this.runQuery( + await this.db.execute( `INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)`, [groupId, data.name, data.description, now, now] @@ -144,7 +144,7 @@ export class GroupService { * @returns Group or null if not found */ public async getGroupById(id: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM groups WHERE id = ?', [id] ); @@ -157,7 +157,7 @@ export class GroupService { * @returns Group or null if not found */ private async getGroupByName(name: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM groups WHERE name = ?', [name] ); @@ -208,7 +208,7 @@ export class GroupService { params.push(id); // Execute update - await this.runQuery( + await this.db.execute( `UPDATE groups SET ${updates.join(', ')} WHERE id = ?`, params ); @@ -236,7 +236,7 @@ export class GroupService { } // Hard delete: CASCADE will remove user_groups and group_roles associations - await this.runQuery( + await this.db.execute( 'DELETE FROM groups WHERE id = ?', [id] ); @@ -265,14 +265,14 @@ export class GroupService { const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : ''; // Get total count - const countResult = await this.getQuery<{ count: number }>( + const countResult = await this.db.queryOne<{ count: number }>( `SELECT COUNT(*) as count FROM groups ${whereClause}`, params ); const total = countResult?.count ?? 0; // Get paginated results - const groups = await this.allQuery( + const groups = await this.db.query( `SELECT * FROM groups ${whereClause} ORDER BY name ASC LIMIT ? OFFSET ?`, [...params, limit, offset] ); @@ -292,7 +292,7 @@ export class GroupService { * @returns Array of users who are members of the group */ public async getGroupMembers(groupId: string): Promise { - return this.allQuery( + return this.db.query( `SELECT u.* FROM users u INNER JOIN user_groups ug ON ug.userId = u.id WHERE ug.groupId = ? @@ -308,7 +308,7 @@ export class GroupService { * @returns Number of members in the group */ public async getGroupMemberCount(groupId: string): Promise { - const result = await this.getQuery<{ count: number }>( + const result = await this.db.queryOne<{ count: number }>( `SELECT COUNT(*) as count FROM user_groups WHERE groupId = ?`, [groupId] ); @@ -329,7 +329,7 @@ export class GroupService { } // Check if role exists - const role = await this.getQuery( + const role = await this.db.queryOne( 'SELECT * FROM roles WHERE id = ?', [roleId] ); @@ -338,7 +338,7 @@ export class GroupService { } // Check if assignment already exists - const existing = await this.getQuery<{ groupId: string }>( + const existing = await this.db.queryOne<{ groupId: string }>( 'SELECT groupId FROM group_roles WHERE groupId = ? AND roleId = ?', [groupId, roleId] ); @@ -347,7 +347,7 @@ export class GroupService { } // Create assignment - await this.runQuery( + await this.db.execute( 'INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)', [groupId, roleId, new Date().toISOString()] ); @@ -362,7 +362,7 @@ export class GroupService { */ public async removeRoleFromGroup(groupId: string, roleId: string): Promise { // Check if assignment exists - const existing = await this.getQuery<{ groupId: string }>( + const existing = await this.db.queryOne<{ groupId: string }>( 'SELECT groupId FROM group_roles WHERE groupId = ? AND roleId = ?', [groupId, roleId] ); @@ -371,7 +371,7 @@ export class GroupService { } // Remove assignment - await this.runQuery( + await this.db.execute( 'DELETE FROM group_roles WHERE groupId = ? AND roleId = ?', [groupId, roleId] ); @@ -384,7 +384,7 @@ export class GroupService { * @returns Array of roles assigned to the group */ public async getGroupRoles(groupId: string): Promise { - return this.allQuery( + return this.db.query( `SELECT r.* FROM roles r INNER JOIN group_roles gr ON gr.roleId = r.id WHERE gr.groupId = ? @@ -393,39 +393,4 @@ export class GroupService { ); } - /** - * Helper: Run a query that doesn't return rows - */ - private runQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - } - - /** - * Helper: Get a single row - */ - private getQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.get(sql, params, (err: Error | null, row: unknown) => { - if (err) reject(err); - else resolve((row as T) ?? null); - }); - }); - } - - /** - * Helper: Get all rows - */ - private allQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.all(sql, params, (err: Error | null, rows: unknown[]) => { - if (err) reject(err); - else resolve(rows as T[]); - }); - }); - } } diff --git a/backend/src/services/IntegrationColorService.ts b/backend/src/services/IntegrationColorService.ts index cc3f2ed7..d91db2ec 100644 --- a/backend/src/services/IntegrationColorService.ts +++ b/backend/src/services/IntegrationColorService.ts @@ -17,6 +17,8 @@ export interface IntegrationColors { puppetserver: IntegrationColorConfig; hiera: IntegrationColorConfig; ssh: IntegrationColorConfig; + proxmox: IntegrationColorConfig; + aws: IntegrationColorConfig; } /** @@ -40,35 +42,48 @@ export class IntegrationColorService { // Define color palette for each integration // Colors inspired by Puppet logo for better visibility and brand consistency this.colors = { + // Provisioning tools — vivid blues + proxmox: { + primary: '#3B82F6', // Vivid blue + light: '#EFF6FF', + dark: '#2563EB', + }, + aws: { + primary: '#06B6D4', // Vivid cyan + light: '#ECFEFF', + dark: '#0891B2', + }, + // Remote execution tools — vivid greens bolt: { - primary: '#FFAE1A', // Bright orange from Puppet logo - light: '#FFF4E0', - dark: '#CC8B15', + primary: '#22C55E', // Vivid green + light: '#F0FDF4', + dark: '#16A34A', }, ansible: { - primary: '#1A4D8F', // Blue for Ansible - light: '#E8F1FF', - dark: '#133A6D', + primary: '#10B981', // Vivid emerald + light: '#ECFDF5', + dark: '#059669', + }, + ssh: { + primary: '#A3E635', // Vivid lime + light: '#F7FEE7', + dark: '#84CC16', }, + // Puppet ecosystem — vivid warm tones puppetdb: { - primary: '#9063CD', // Violet/purple from Puppet logo - light: '#F0E6FF', - dark: '#7249A8', + primary: '#F97316', // Vivid orange + light: '#FFF7ED', + dark: '#EA580C', }, puppetserver: { - primary: '#2E3A87', // Dark blue from Puppet logo - light: '#E8EAFF', - dark: '#1F2760', + primary: '#EF4444', // Vivid red + light: '#FEF2F2', + dark: '#DC2626', }, hiera: { - primary: '#C1272D', // Dark red - light: '#FFE8E9', - dark: '#9A1F24', - }, - ssh: { - primary: '#10B981', // Green for SSH connectivity - light: '#D1FAE5', - dark: '#059669', + primary: '#F59E0B', // Vivid amber + light: '#FFFBEB', + dark: '#D97706', }, }; diff --git a/backend/src/services/IntegrationConfigService.ts b/backend/src/services/IntegrationConfigService.ts new file mode 100644 index 00000000..6c651a92 --- /dev/null +++ b/backend/src/services/IntegrationConfigService.ts @@ -0,0 +1,357 @@ +import { randomUUID, randomBytes, scryptSync, createCipheriv, createDecipheriv } from "crypto"; +import type { DatabaseAdapter } from "../database/DatabaseAdapter"; +import { + CreateIntegrationConfigSchema, + type IntegrationConfigRecord, + type IntegrationConfigRow, +} from "./IntegrationConfigService.types"; + +/** + * Regex patterns for detecting sensitive config field names. + * Matches field names containing token, password, secret, or key (case-insensitive). + * Requirements: 18.4, 20.1 + */ +const SENSITIVE_FIELD_PATTERN = /token|password|secret|key/i; + +/** + * Encrypted field envelope stored as a JSON string in the config column. + * The presence of all four properties distinguishes encrypted values from plaintext. + */ +interface EncryptedEnvelope { + iv: string; + salt: string; + encrypted: string; + tag: string; +} + +/** + * IntegrationConfigService — CRUD operations for per-user integration configs + * with AES-256-GCM encryption of sensitive fields at rest. + * + * Requirements: 18.1, 18.2, 18.3, 18.4, 18.5, 19.1, 19.2, 19.3, 19.4, 20.1, 20.2, 20.3, 20.4 + */ +export class IntegrationConfigService { + private db: DatabaseAdapter; + private secret: string; + private envConfigProvider?: (integrationName: string) => Record; + + constructor( + db: DatabaseAdapter, + secret: string, + envConfigProvider?: (integrationName: string) => Record, + ) { + this.db = db; + this.secret = secret; + this.envConfigProvider = envConfigProvider; + } + + // --------------------------------------------------------------------------- + // Public API + // --------------------------------------------------------------------------- + + /** + * Save (upsert) an integration config for a user. + * Validates against the CreateIntegrationConfig Zod schema, encrypts sensitive + * fields, then inserts or updates the record. + * Requirements: 18.1, 18.2, 18.3, 18.4 + */ + async saveConfig( + userId: string, + integrationName: string, + config: Record, + ): Promise { + // Validate input + CreateIntegrationConfigSchema.parse({ userId, integrationName, config }); + + const now = new Date().toISOString(); + const id = randomUUID(); + const encryptedConfig = this.encryptSensitiveFields(config); + const configJson = JSON.stringify(encryptedConfig); + + await this.db.execute( + `INSERT INTO integration_configs (id, userId, integrationName, config, isActive, createdAt, updatedAt) + VALUES (?, ?, ?, ?, 1, ?, ?) + ON CONFLICT(userId, integrationName) DO UPDATE SET + config = excluded.config, + updatedAt = excluded.updatedAt`, + [id, userId, integrationName, configJson, now, now], + ); + } + + /** + * Retrieve a single config record, decrypting sensitive fields. + * Requirements: 18.1, 18.5 + */ + async getConfig( + userId: string, + integrationName: string, + ): Promise { + const row = await this.db.queryOne( + `SELECT * FROM integration_configs WHERE userId = ? AND integrationName = ?`, + [userId, integrationName], + ); + if (!row) return null; + return this.rowToRecord(row); + } + + /** + * Delete a config record. + * Requirements: 18.1 + */ + async deleteConfig(userId: string, integrationName: string): Promise { + await this.db.execute( + `DELETE FROM integration_configs WHERE userId = ? AND integrationName = ?`, + [userId, integrationName], + ); + } + + /** + * List all configs for a user, decrypting sensitive fields. + * Requirements: 18.1 + */ + async listConfigs(userId: string): Promise { + const rows = await this.db.query( + `SELECT * FROM integration_configs WHERE userId = ?`, + [userId], + ); + return rows.map((r) => this.rowToRecord(r)); + } + + /** + * Retrieve all active configs (decrypted). Used at startup to merge with .env. + * Requirements: 18.1 + */ + async getActiveConfigs(): Promise { + const rows = await this.db.query( + `SELECT * FROM integration_configs WHERE isActive = 1`, + ); + return rows.map((r) => this.rowToRecord(r)); + } + + /** + * Get the effective (merged) config for an integration. + * .env values serve as the base; DB values override for non-null keys (shallow merge). + * Requirements: 19.1, 19.2, 19.3, 19.4 + */ + async getEffectiveConfig( + integrationName: string, + ): Promise> { + const envConfig = this.envConfigProvider + ? this.envConfigProvider(integrationName) + : null; + + // Find the first active DB config for this integration + const row = await this.db.queryOne( + `SELECT * FROM integration_configs WHERE integrationName = ? AND isActive = 1 LIMIT 1`, + [integrationName], + ); + const dbConfig = row ? this.rowToRecord(row).config : null; + + if (!envConfig && !dbConfig) return {}; + if (!dbConfig) return { ...envConfig! }; + if (!envConfig) return { ...dbConfig }; + + // Shallow merge: env as base, DB overrides for non-null keys + const result: Record = { ...envConfig }; + for (const [key, value] of Object.entries(dbConfig)) { + if (value !== null && value !== undefined) { + result[key] = value; + } + } + return result; + } + + /** + * Re-encrypt all stored configs atomically: decrypt with oldKey, re-encrypt with newKey. + * Requirements: 20.4 + */ + async rotateEncryptionKey(oldKey: string, newKey: string): Promise { + await this.db.withTransaction(async () => { + const rows = await this.db.query( + `SELECT * FROM integration_configs`, + ); + + for (const row of rows) { + const rawConfig: Record = JSON.parse(row.config); + + // Decrypt sensitive fields with the old key + const decrypted = this.decryptSensitiveFieldsWithKey(rawConfig, oldKey); + + // Re-encrypt sensitive fields with the new key + const reEncrypted = this.encryptSensitiveFieldsWithKey(decrypted, newKey); + + const now = new Date().toISOString(); + await this.db.execute( + `UPDATE integration_configs SET config = ?, updatedAt = ? WHERE id = ?`, + [JSON.stringify(reEncrypted), now, row.id], + ); + } + }); + + // Update the service's internal secret to the new key + this.secret = newKey; + } + + // --------------------------------------------------------------------------- + // Row transformation + // --------------------------------------------------------------------------- + + private rowToRecord(row: IntegrationConfigRow): IntegrationConfigRecord { + const rawConfig: Record = JSON.parse(row.config); + const decryptedConfig = this.decryptSensitiveFields(rawConfig); + return { + id: row.id, + userId: row.userId, + integrationName: row.integrationName, + config: decryptedConfig, + isActive: row.isActive === 1, + createdAt: row.createdAt, + updatedAt: row.updatedAt, + }; + } + + // --------------------------------------------------------------------------- + // Encryption helpers + // --------------------------------------------------------------------------- + + /** + * Determine whether a field name is sensitive. + */ + private isSensitiveField(fieldName: string): boolean { + return SENSITIVE_FIELD_PATTERN.test(fieldName); + } + + /** + * Encrypt a single string value using AES-256-GCM. + * Returns a JSON-encoded EncryptedEnvelope string. + * Requirements: 20.1, 20.2 + */ + private encryptValue(value: string, keyOverride?: string): string { + const secret = keyOverride ?? this.secret; + const salt = randomBytes(16); + const iv = randomBytes(12); + const derivedKey = scryptSync(secret, salt, 32); + + const cipher = createCipheriv("aes-256-gcm", derivedKey, iv); + let encrypted = cipher.update(value, "utf8", "hex"); + encrypted += cipher.final("hex"); + const tag = cipher.getAuthTag(); + + const envelope: EncryptedEnvelope = { + iv: iv.toString("hex"), + salt: salt.toString("hex"), + encrypted, + tag: tag.toString("hex"), + }; + return JSON.stringify(envelope); + } + + /** + * Decrypt a value previously encrypted by encryptValue. + * Requirements: 20.3 + */ + private decryptValue(envelopeStr: string, keyOverride?: string): string { + const secret = keyOverride ?? this.secret; + const envelope: EncryptedEnvelope = JSON.parse(envelopeStr); + const salt = Buffer.from(envelope.salt, "hex"); + const iv = Buffer.from(envelope.iv, "hex"); + const tag = Buffer.from(envelope.tag, "hex"); + const derivedKey = scryptSync(secret, salt, 32); + + const decipher = createDecipheriv("aes-256-gcm", derivedKey, iv); + decipher.setAuthTag(tag); + let decrypted = decipher.update(envelope.encrypted, "hex", "utf8"); + decrypted += decipher.final("utf8"); + return decrypted; + } + + /** + * Check whether a value looks like an EncryptedEnvelope JSON string. + */ + private isEncryptedEnvelope(value: unknown): boolean { + if (typeof value !== "string") return false; + try { + const parsed = JSON.parse(value); + return ( + typeof parsed === "object" && + parsed !== null && + "iv" in parsed && + "salt" in parsed && + "encrypted" in parsed && + "tag" in parsed + ); + } catch { + return false; + } + } + + /** + * Iterate over config keys and encrypt values of sensitive fields. + */ + private encryptSensitiveFields( + config: Record, + ): Record { + const result: Record = {}; + for (const [key, value] of Object.entries(config)) { + if (this.isSensitiveField(key) && value != null) { + result[key] = this.encryptValue(String(value)); + } else { + result[key] = value; + } + } + return result; + } + + /** + * Iterate over config keys and decrypt values that are encrypted envelopes. + */ + private decryptSensitiveFields( + config: Record, + ): Record { + const result: Record = {}; + for (const [key, value] of Object.entries(config)) { + if (this.isEncryptedEnvelope(value)) { + result[key] = this.decryptValue(value as string); + } else { + result[key] = value; + } + } + return result; + } + + /** + * Encrypt sensitive fields using a specific key (for key rotation). + */ + private encryptSensitiveFieldsWithKey( + config: Record, + key: string, + ): Record { + const result: Record = {}; + for (const [k, value] of Object.entries(config)) { + if (this.isSensitiveField(k) && value != null) { + result[k] = this.encryptValue(String(value), key); + } else { + result[k] = value; + } + } + return result; + } + + /** + * Decrypt sensitive fields using a specific key (for key rotation). + */ + private decryptSensitiveFieldsWithKey( + config: Record, + key: string, + ): Record { + const result: Record = {}; + for (const [k, value] of Object.entries(config)) { + if (this.isEncryptedEnvelope(value)) { + result[k] = this.decryptValue(value as string, key); + } else { + result[k] = value; + } + } + return result; + } +} diff --git a/backend/src/services/IntegrationConfigService.types.ts b/backend/src/services/IntegrationConfigService.types.ts new file mode 100644 index 00000000..a4cfd8fe --- /dev/null +++ b/backend/src/services/IntegrationConfigService.types.ts @@ -0,0 +1,63 @@ +import { z } from "zod"; + +/** + * Integration Config Service Types and Zod Schemas + * + * Type definitions and validation schemas for the integration configuration + * storage system. Configs are stored per-user with sensitive fields encrypted + * at rest using AES-256-GCM. + * + * Requirements: 32.1, 32.2, 32.3, 32.4 + */ + +// ============================================================================ +// Integration Config Record Schemas +// ============================================================================ + +/** + * Full integration config record as stored in the database and returned by queries. + * The `config` field contains a JSON object; sensitive fields are decrypted before + * being returned to callers. + * Requirements: 32.1 + */ +export const IntegrationConfigRecordSchema = z.object({ + id: z.string().min(1), + userId: z.string().min(1), + integrationName: z.string().min(1), + config: z.record(z.unknown()), + isActive: z.boolean(), + createdAt: z.string(), + updatedAt: z.string(), +}); + +export type IntegrationConfigRecord = z.infer; + +/** + * Schema for creating or updating an integration config. + * The id, createdAt, updatedAt, and isActive fields are managed internally. + * Requirements: 32.1, 32.2 + */ +export const CreateIntegrationConfigSchema = z.object({ + userId: z.string().min(1), + integrationName: z.string().min(1), + config: z.record(z.unknown()), +}); + +export type CreateIntegrationConfig = z.infer; + +/** + * Raw database row shape before transformation. + * The `isActive` column is stored as INTEGER (0/1) in SQLite and needs + * conversion to boolean, and `config` is stored as a JSON string. + */ +export const IntegrationConfigRowSchema = z.object({ + id: z.string(), + userId: z.string(), + integrationName: z.string(), + config: z.string(), + isActive: z.number(), + createdAt: z.string(), + updatedAt: z.string(), +}); + +export type IntegrationConfigRow = z.infer; diff --git a/backend/src/services/PermissionService.ts b/backend/src/services/PermissionService.ts index 1b69da9f..cbed0d8f 100644 --- a/backend/src/services/PermissionService.ts +++ b/backend/src/services/PermissionService.ts @@ -1,4 +1,4 @@ -import type { Database } from 'sqlite3'; +import type { DatabaseAdapter } from '../database/DatabaseAdapter'; import { randomUUID } from 'crypto'; import { performanceMonitor } from './PerformanceMonitor'; @@ -65,11 +65,11 @@ export interface PaginatedResult { * - Cache permission check results for performance */ export class PermissionService { - private db: Database; + private db: DatabaseAdapter; private cache: Map; private readonly CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes in milliseconds - constructor(db: Database) { + constructor(db: DatabaseAdapter) { this.db = db; this.cache = new Map(); } @@ -116,7 +116,7 @@ export class PermissionService { const now = new Date().toISOString(); // Insert permission - await this.runQuery( + await this.db.execute( `INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES (?, ?, ?, ?, ?)`, [permissionId, data.resource, data.action, data.description || '', now] @@ -138,7 +138,7 @@ export class PermissionService { * @returns Permission or null if not found */ public async getPermissionById(id: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM permissions WHERE id = ?', [id] ); @@ -152,7 +152,7 @@ export class PermissionService { * @returns Permission or null if not found */ private async getPermissionByResourceAction(resource: string, action: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM permissions WHERE resource = ? AND "action" = ?', [resource, action] ); @@ -191,14 +191,14 @@ export class PermissionService { const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : ''; // Get total count - const countResult = await this.getQuery<{ count: number }>( + const countResult = await this.db.queryOne<{ count: number }>( `SELECT COUNT(*) as count FROM permissions ${whereClause}`, params ); const total = countResult?.count ?? 0; // Get paginated results - const permissions = await this.allQuery( + const permissions = await this.db.query( `SELECT * FROM permissions ${whereClause} ORDER BY resource ASC, "action" ASC LIMIT ? OFFSET ?`, [...params, String(limit), String(offset)] ); @@ -258,7 +258,7 @@ export class PermissionService { } // Step 2: Check if user exists and get admin/active status (Requirement 5.5, 5.6) - const user = await this.getQuery<{ isAdmin: number; isActive: number }>( + const user = await this.db.queryOne<{ isAdmin: number; isActive: number }>( 'SELECT isAdmin, isActive FROM users WHERE id = ?', [userId] ); @@ -309,7 +309,7 @@ export class PermissionService { ) `; - const result = await this.getQuery<{ count: number }>(hasPermissionQuery, [ + const result = await this.db.queryOne<{ count: number }>(hasPermissionQuery, [ resource, action, userId, @@ -361,6 +361,47 @@ export class PermissionService { } } + /** + * Invalidate cached permission checks for ALL users who have a given role. + * + * This covers users assigned the role directly (user_roles) and users + * who inherit the role through a group (user_groups → group_roles). + * + * Call this when permissions are added to or removed from a role so that + * subsequent hasPermission checks reflect the change. + * + * @param roleId - Role ID whose affected users' caches should be invalidated + * + * Requirement: 30.2 + */ + public async invalidateRolePermissionCache(roleId: string): Promise { + // Find all users affected by this role (direct + group-based) + const affectedUsers = await this.db.query<{ userId: string }>( + `SELECT userId FROM user_roles WHERE roleId = ? + UNION + SELECT ug.userId FROM user_groups ug + INNER JOIN group_roles gr ON gr.groupId = ug.groupId + WHERE gr.roleId = ?`, + [roleId, roleId] + ); + + for (const user of affectedUsers) { + this.invalidateUserPermissionCache(user.userId); + } + } + + /** + * Clear the entire permission cache. + * + * Useful for bulk operations like migrations where many permissions + * may change at once. + * + * Requirement: 30.2 + */ + public invalidateAllPermissionCache(): void { + this.cache.clear(); + } + /** * Get all permissions for a user * @@ -383,7 +424,7 @@ export class PermissionService { */ public async getUserPermissions(userId: string): Promise { // Step 1: Check if user exists and get admin/active status - const user = await this.getQuery<{ isAdmin: number; isActive: number }>( + const user = await this.db.queryOne<{ isAdmin: number; isActive: number }>( 'SELECT isAdmin, isActive FROM users WHERE id = ?', [userId] ); @@ -395,7 +436,7 @@ export class PermissionService { // Admin users get all permissions (Requirement 8.3) if (user.isAdmin === 1) { - return this.allQuery( + return this.db.query( 'SELECT * FROM permissions ORDER BY resource ASC, "action" ASC' ); } @@ -419,7 +460,7 @@ export class PermissionService { ORDER BY p.resource ASC, p.action ASC `; - return this.allQuery(permissionsQuery, [userId, userId]); + return this.db.query(permissionsQuery, [userId, userId]); } /** @@ -466,7 +507,7 @@ export class PermissionService { } // Step 2: Check user status once for all uncached checks - const user = await this.getQuery<{ isAdmin: number; isActive: number }>( + const user = await this.db.queryOne<{ isAdmin: number; isActive: number }>( 'SELECT isAdmin, isActive FROM users WHERE id = ?', [userId] ); @@ -522,7 +563,7 @@ export class PermissionService { ) `; - const allowedPermissions = await this.allQuery<{ resource: string; action: string }>( + const allowedPermissions = await this.db.query<{ resource: string; action: string }>( batchQuery, [...params, userId, userId] ); @@ -550,39 +591,4 @@ export class PermissionService { - /** - * Helper: Run a query that doesn't return rows - */ - private runQuery(sql: string, params: (string | number | boolean | null)[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - } - - /** - * Helper: Get a single row - */ - private getQuery(sql: string, params: (string | number | boolean | null)[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.get(sql, params, (err, row) => { - if (err) reject(err); - else resolve((row as T) ?? null); - }); - }); - } - - /** - * Helper: Get all rows - */ - private allQuery(sql: string, params: (string | number | boolean | null)[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.all(sql, params, (err, rows) => { - if (err) reject(err); - else resolve(rows as T[]); - }); - }); - } } diff --git a/backend/src/services/RoleService.ts b/backend/src/services/RoleService.ts index 80d4d23c..08ae4245 100644 --- a/backend/src/services/RoleService.ts +++ b/backend/src/services/RoleService.ts @@ -1,4 +1,4 @@ -import type { Database } from 'sqlite3'; +import type { DatabaseAdapter } from '../database/DatabaseAdapter'; import { randomUUID } from 'crypto'; /** @@ -81,9 +81,9 @@ export interface Permission { * - Validate role data */ export class RoleService { - private db: Database; + private db: DatabaseAdapter; - constructor(db: Database) { + constructor(db: DatabaseAdapter) { this.db = db; } @@ -116,7 +116,7 @@ export class RoleService { const now = new Date().toISOString(); // Insert role (isBuiltIn defaults to 0 for custom roles) - await this.runQuery( + await this.db.execute( `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?)`, [roleId, data.name, data.description || '', now, now] @@ -138,7 +138,7 @@ export class RoleService { * @returns Role or null if not found */ public async getRoleById(id: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM roles WHERE id = ?', [id] ); @@ -151,7 +151,7 @@ export class RoleService { * @returns Role or null if not found */ private async getRoleByName(name: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM roles WHERE name = ?', [name] ); @@ -217,7 +217,7 @@ export class RoleService { params.push(id); // Execute update - await this.runQuery( + await this.db.execute( `UPDATE roles SET ${updates.join(', ')} WHERE id = ?`, params ); @@ -250,7 +250,7 @@ export class RoleService { } // Hard delete: CASCADE will remove user_roles, group_roles, and role_permissions associations - await this.runQuery( + await this.db.execute( 'DELETE FROM roles WHERE id = ?', [id] ); @@ -279,14 +279,14 @@ export class RoleService { const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : ''; // Get total count - const countResult = await this.getQuery<{ count: number }>( + const countResult = await this.db.queryOne<{ count: number }>( `SELECT COUNT(*) as count FROM roles ${whereClause}`, params ); const total = countResult?.count ?? 0; // Get paginated results - const roles = await this.allQuery( + const roles = await this.db.query( `SELECT * FROM roles ${whereClause} ORDER BY name ASC LIMIT ? OFFSET ?`, [...params, limit, offset] ); @@ -305,7 +305,7 @@ export class RoleService { * @returns Array of built-in roles (Viewer, Operator, Administrator) */ public async getBuiltInRoles(): Promise { - return this.allQuery( + return this.db.query( 'SELECT * FROM roles WHERE isBuiltIn = 1 ORDER BY name', [] ); @@ -337,7 +337,7 @@ export class RoleService { } // Check if permission exists - const permission = await this.getQuery( + const permission = await this.db.queryOne( 'SELECT * FROM permissions WHERE id = ?', [permissionId] ); @@ -346,7 +346,7 @@ export class RoleService { } // Check if assignment already exists - const existing = await this.getQuery<{ roleId: string }>( + const existing = await this.db.queryOne<{ roleId: string }>( 'SELECT roleId FROM role_permissions WHERE roleId = ? AND permissionId = ?', [roleId, permissionId] ); @@ -355,7 +355,7 @@ export class RoleService { } // Create assignment - await this.runQuery( + await this.db.execute( 'INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)', [roleId, permissionId, new Date().toISOString()] ); @@ -370,7 +370,7 @@ export class RoleService { */ public async removePermissionFromRole(roleId: string, permissionId: string): Promise { // Check if assignment exists - const existing = await this.getQuery<{ roleId: string }>( + const existing = await this.db.queryOne<{ roleId: string }>( 'SELECT roleId FROM role_permissions WHERE roleId = ? AND permissionId = ?', [roleId, permissionId] ); @@ -379,7 +379,7 @@ export class RoleService { } // Remove assignment - await this.runQuery( + await this.db.execute( 'DELETE FROM role_permissions WHERE roleId = ? AND permissionId = ?', [roleId, permissionId] ); @@ -392,7 +392,7 @@ export class RoleService { * @returns Array of permissions assigned to the role */ public async getRolePermissions(roleId: string): Promise { - return this.allQuery( + return this.db.query( `SELECT p.* FROM permissions p INNER JOIN role_permissions rp ON rp.permissionId = p.id WHERE rp.roleId = ? @@ -401,39 +401,4 @@ export class RoleService { ); } - /** - * Helper: Run a query that doesn't return rows - */ - private runQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - } - - /** - * Helper: Get a single row - */ - private getQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.get(sql, params, (err: Error | null, row: unknown) => { - if (err) reject(err); - else resolve((row as T) ?? null); - }); - }); - } - - /** - * Helper: Get all rows - */ - private allQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.all(sql, params, (err: Error | null, rows: unknown[]) => { - if (err) reject(err); - else resolve(rows as T[]); - }); - }); - } } diff --git a/backend/src/services/SetupService.ts b/backend/src/services/SetupService.ts index 331e6a27..fba82248 100644 --- a/backend/src/services/SetupService.ts +++ b/backend/src/services/SetupService.ts @@ -1,4 +1,4 @@ -import type { Database } from "sqlite3"; +import type { DatabaseAdapter } from "../database/DatabaseAdapter"; import { LoggerService } from "./LoggerService"; const logger = new LoggerService(); @@ -24,9 +24,9 @@ export interface SetupStatus { * Service for managing initial setup and configuration */ export class SetupService { - private db: Database; + private db: DatabaseAdapter; - constructor(db: Database) { + constructor(db: DatabaseAdapter) { this.db = db; } @@ -35,22 +35,18 @@ export class SetupService { * Setup is complete if at least one admin user exists */ public async isSetupComplete(): Promise { - return new Promise((resolve, reject) => { - this.db.get( - "SELECT COUNT(*) as count FROM users WHERE isAdmin = 1", - (err, row: { count: number } | undefined) => { - if (err) { - logger.error("Failed to check setup status", { - component: "SetupService", - operation: "isSetupComplete", - }, err); - reject(err); - } else { - resolve((row?.count ?? 0) > 0); - } - } + try { + const row = await this.db.queryOne<{ count: number }>( + "SELECT COUNT(*) as count FROM users WHERE isAdmin = 1" ); - }); + return (row?.count ?? 0) > 0; + } catch (err) { + logger.error("Failed to check setup status", { + component: "SetupService", + operation: "isSetupComplete", + }, err instanceof Error ? err : new Error(String(err))); + throw err; + } } /** @@ -98,50 +94,41 @@ export class SetupService { * Get a configuration value from the database */ private async getConfigValue(key: string, defaultValue: string): Promise { - return new Promise((resolve, reject) => { - this.db.get( + try { + const row = await this.db.queryOne<{ value: string }>( "SELECT value FROM config WHERE key = ?", - [key], - (err, row: { value: string } | undefined) => { - if (err) { - logger.error("Failed to get config value", { - component: "SetupService", - operation: "getConfigValue", - metadata: { key }, - }, err); - reject(err); - } else { - resolve(row?.value ?? defaultValue); - } - } + [key] ); - }); + return row?.value ?? defaultValue; + } catch (err) { + logger.error("Failed to get config value", { + component: "SetupService", + operation: "getConfigValue", + metadata: { key }, + }, err instanceof Error ? err : new Error(String(err))); + throw err; + } } /** * Set a configuration value in the database */ private async setConfigValue(key: string, value: string): Promise { - return new Promise((resolve, reject) => { - this.db.run( + try { + await this.db.execute( `INSERT INTO config (key, value, updatedAt) VALUES (?, ?, datetime('now')) ON CONFLICT(key) DO UPDATE SET value = ?, updatedAt = datetime('now')`, - [key, value, value], - (err) => { - if (err) { - logger.error("Failed to set config value", { - component: "SetupService", - operation: "setConfigValue", - metadata: { key }, - }, err); - reject(err); - } else { - resolve(); - } - } + [key, value, value] ); - }); + } catch (err) { + logger.error("Failed to set config value", { + component: "SetupService", + operation: "setConfigValue", + metadata: { key }, + }, err instanceof Error ? err : new Error(String(err))); + throw err; + } } /** diff --git a/backend/src/services/UserService.ts b/backend/src/services/UserService.ts index f52d4dfb..d6b09b17 100644 --- a/backend/src/services/UserService.ts +++ b/backend/src/services/UserService.ts @@ -1,4 +1,4 @@ -import type { Database } from 'sqlite3'; +import type { DatabaseAdapter } from '../database/DatabaseAdapter'; import { randomUUID } from 'crypto'; import type { AuthenticationService } from './AuthenticationService'; import { SetupService } from './SetupService'; @@ -117,11 +117,11 @@ export interface Role { * - Validate user data */ export class UserService { - private db: Database; + private db: DatabaseAdapter; private authService: AuthenticationService; private setupService: SetupService; - constructor(db: Database, authService: AuthenticationService) { + constructor(db: DatabaseAdapter, authService: AuthenticationService) { this.db = db; this.authService = authService; this.setupService = new SetupService(db); @@ -161,7 +161,7 @@ export class UserService { const now = new Date().toISOString(); // Insert user - await this.runQuery( + await this.db.execute( `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, [ @@ -183,7 +183,7 @@ export class UserService { if (!data.isAdmin) { const defaultRoleId = await this.setupService.getDefaultNewUserRole(); if (defaultRoleId) { - await this.runQuery( + await this.db.execute( `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, [userId, defaultRoleId, now] @@ -207,7 +207,7 @@ export class UserService { * @returns User or null if not found */ public async getUserById(id: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM users WHERE id = ?', [id] ); @@ -220,7 +220,7 @@ export class UserService { * @returns User or null if not found */ public async getUserByUsername(username: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM users WHERE username = ?', [username] ); @@ -233,7 +233,7 @@ export class UserService { * @returns User or null if not found */ private async getUserByEmail(email: string): Promise { - return this.getQuery( + return this.db.queryOne( 'SELECT * FROM users WHERE email = ?', [email] ); @@ -311,7 +311,7 @@ export class UserService { params.push(id); // Execute update - await this.runQuery( + await this.db.execute( `UPDATE users SET ${updates.join(', ')} WHERE id = ?`, params ); @@ -339,7 +339,7 @@ export class UserService { } // Soft delete: set isActive to 0 - await this.runQuery( + await this.db.execute( 'UPDATE users SET isActive = 0, updatedAt = ? WHERE id = ?', [new Date().toISOString(), id] ); @@ -378,14 +378,14 @@ export class UserService { const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : ''; // Get total count - const countResult = await this.getQuery<{ count: number }>( + const countResult = await this.db.queryOne<{ count: number }>( `SELECT COUNT(*) as count FROM users ${whereClause}`, params ); const total = countResult?.count ?? 0; // Get paginated results - const users = await this.allQuery( + const users = await this.db.query( `SELECT * FROM users ${whereClause} ORDER BY createdAt DESC LIMIT ? OFFSET ?`, [...params, limit, offset] ); @@ -413,7 +413,7 @@ export class UserService { } // Check if group exists - const group = await this.getQuery( + const group = await this.db.queryOne( 'SELECT * FROM groups WHERE id = ?', [groupId] ); @@ -422,7 +422,7 @@ export class UserService { } // Check if association already exists - const existing = await this.getQuery<{ userId: string }>( + const existing = await this.db.queryOne<{ userId: string }>( 'SELECT userId FROM user_groups WHERE userId = ? AND groupId = ?', [userId, groupId] ); @@ -431,7 +431,7 @@ export class UserService { } // Create association - await this.runQuery( + await this.db.execute( 'INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)', [userId, groupId, new Date().toISOString()] ); @@ -446,7 +446,7 @@ export class UserService { */ public async removeUserFromGroup(userId: string, groupId: string): Promise { // Check if association exists - const existing = await this.getQuery<{ userId: string }>( + const existing = await this.db.queryOne<{ userId: string }>( 'SELECT userId FROM user_groups WHERE userId = ? AND groupId = ?', [userId, groupId] ); @@ -455,7 +455,7 @@ export class UserService { } // Remove association - await this.runQuery( + await this.db.execute( 'DELETE FROM user_groups WHERE userId = ? AND groupId = ?', [userId, groupId] ); @@ -468,7 +468,7 @@ export class UserService { * @returns Array of groups */ public async getUserGroups(userId: string): Promise { - return this.allQuery( + return this.db.query( `SELECT g.* FROM groups g INNER JOIN user_groups ug ON ug.groupId = g.id WHERE ug.userId = ? @@ -492,7 +492,7 @@ export class UserService { } // Check if role exists - const role = await this.getQuery( + const role = await this.db.queryOne( 'SELECT * FROM roles WHERE id = ?', [roleId] ); @@ -501,7 +501,7 @@ export class UserService { } // Check if assignment already exists - const existing = await this.getQuery<{ userId: string }>( + const existing = await this.db.queryOne<{ userId: string }>( 'SELECT userId FROM user_roles WHERE userId = ? AND roleId = ?', [userId, roleId] ); @@ -510,7 +510,7 @@ export class UserService { } // Create assignment - await this.runQuery( + await this.db.execute( 'INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)', [userId, roleId, new Date().toISOString()] ); @@ -525,7 +525,7 @@ export class UserService { */ public async removeRoleFromUser(userId: string, roleId: string): Promise { // Check if assignment exists - const existing = await this.getQuery<{ userId: string }>( + const existing = await this.db.queryOne<{ userId: string }>( 'SELECT userId FROM user_roles WHERE userId = ? AND roleId = ?', [userId, roleId] ); @@ -534,7 +534,7 @@ export class UserService { } // Remove assignment - await this.runQuery( + await this.db.execute( 'DELETE FROM user_roles WHERE userId = ? AND roleId = ?', [userId, roleId] ); @@ -547,7 +547,7 @@ export class UserService { * @returns Array of roles */ public async getUserRoles(userId: string): Promise { - return this.allQuery( + return this.db.query( `SELECT r.* FROM roles r INNER JOIN user_roles ur ON ur.roleId = r.id WHERE ur.userId = ? @@ -594,39 +594,4 @@ export class UserService { }; } - /** - * Helper: Run a query that doesn't return rows - */ - private runQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - } - - /** - * Helper: Get a single row - */ - private getQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.get(sql, params, (err, row) => { - if (err) reject(err); - else resolve(row as T || null); - }); - }); - } - - /** - * Helper: Get all rows - */ - private allQuery(sql: string, params: unknown[] = []): Promise { - return new Promise((resolve, reject) => { - this.db.all(sql, params, (err, rows) => { - if (err) reject(err); - else resolve(rows as T[]); - }); - }); - } } diff --git a/backend/src/services/journal/JournalService.ts b/backend/src/services/journal/JournalService.ts new file mode 100644 index 00000000..441c0a16 --- /dev/null +++ b/backend/src/services/journal/JournalService.ts @@ -0,0 +1,250 @@ +import { randomUUID } from "crypto"; +import type { DatabaseAdapter } from "../../database/DatabaseAdapter"; +import { + CreateJournalEntrySchema, + TimelineOptionsSchema, + SearchOptionsSchema, + type CreateJournalEntry, + type JournalEntry, + type TimelineOptions, + type SearchOptions, +} from "./types"; + +/** + * Minimal interface for live sources that provide node event data. + * Compatible with InformationSourcePlugin without requiring the full import. + */ +export interface LiveSource { + getNodeData(nodeId: string, dataType: string): Promise; + isInitialized(): boolean; +} + +/** + * JournalService — Records and retrieves a unified timeline of events + * for inventory nodes. Supports provisioning events, lifecycle actions, + * execution results, and manual notes. + * + * Requirements: 22.1, 22.2, 22.3, 22.4, 23.1, 23.2, 23.3, 23.4, 23.5, 24.1, 24.2, 24.3 + */ +export class JournalService { + private db: DatabaseAdapter; + private liveSources: Map; + + constructor(db: DatabaseAdapter, liveSources?: Map) { + this.db = db; + this.liveSources = liveSources ?? new Map(); + } + + /** + * Record a journal event. Validates the entry with CreateJournalEntrySchema, + * generates id/timestamp/isLive, and inserts into journal_entries. + * + * Requirements: 22.1, 22.2, 22.3, 22.4 + */ + async recordEvent(entry: CreateJournalEntry): Promise { + const validated = CreateJournalEntrySchema.parse(entry); + + const id = randomUUID(); + const timestamp = new Date().toISOString(); + const detailsJson = JSON.stringify(validated.details ?? {}); + + const sql = ` + INSERT INTO journal_entries ( + id, nodeId, nodeUri, eventType, source, + "action", summary, details, userId, timestamp + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `; + + const params = [ + id, + validated.nodeId, + validated.nodeUri, + validated.eventType, + validated.source, + validated.action, + validated.summary, + detailsJson, + validated.userId ?? null, + timestamp, + ]; + + await this.db.execute(sql, params); + return id; + } + + /** + * Add a manual note to a node's journal. + * Creates an entry with eventType "note" and source "user". + * + * Requirements: 24.1, 24.2 + */ + async addNote( + nodeId: string, + userId: string, + content: string + ): Promise { + return this.recordEvent({ + nodeId, + nodeUri: `user:${nodeId}`, + eventType: "note", + source: "user", + action: "add_note", + summary: content, + details: {}, + userId, + }); + } + + /** + * Get the timeline of journal entries for a specific node, + * sorted by timestamp descending with pagination. + * + * Requirements: 22.4 + */ + async getNodeTimeline( + nodeId: string, + options?: Partial + ): Promise { + const opts = TimelineOptionsSchema.parse(options ?? {}); + + let sql = `SELECT * FROM journal_entries WHERE nodeId = ?`; + const params: unknown[] = [nodeId]; + + if (opts.eventType) { + sql += ` AND eventType = ?`; + params.push(opts.eventType); + } + + if (opts.source) { + sql += ` AND source = ?`; + params.push(opts.source); + } + + if (opts.startDate) { + sql += ` AND timestamp >= ?`; + params.push(opts.startDate); + } + + if (opts.endDate) { + sql += ` AND timestamp <= ?`; + params.push(opts.endDate); + } + + sql += ` ORDER BY timestamp DESC LIMIT ? OFFSET ?`; + params.push(opts.limit, opts.offset); + + const rows = await this.db.query(sql, params); + + return rows.map((row) => ({ + ...row, + details: typeof row.details === "string" ? JSON.parse(row.details) : row.details ?? {}, + isLive: false, + })); + } + + /** + * Search journal entries across summary and details fields using LIKE. + * + * Requirements: 24.3 + */ + async searchEntries( + query: string, + options?: Partial + ): Promise { + const opts = SearchOptionsSchema.parse(options ?? {}); + const pattern = `%${query}%`; + + const sql = ` + SELECT * FROM journal_entries + WHERE summary LIKE ? OR details LIKE ? + ORDER BY timestamp DESC + LIMIT ? OFFSET ? + `; + + const params = [pattern, pattern, opts.limit, opts.offset]; + + const rows = await this.db.query(sql, params); + + return rows.map((row) => ({ + ...row, + details: typeof row.details === "string" ? JSON.parse(row.details) : row.details ?? {}, + isLive: false, + })); + } + + /** + * Aggregate a unified timeline merging DB-stored events with live-source events. + * Fetches in parallel, marks isLive flags, sorts by timestamp descending, + * and applies limit/offset pagination. Failed live sources are gracefully skipped. + * + * Requirements: 23.1, 23.2, 23.3, 23.4, 23.5 + */ + async aggregateTimeline( + nodeId: string, + options?: Partial + ): Promise { + const opts = TimelineOptionsSchema.parse(options ?? {}); + + // Step 1 & 2: Fetch DB events and live events in parallel + const [dbEntries, liveEntries] = await Promise.all([ + this.getNodeTimeline(nodeId, { ...opts, limit: 200, offset: 0 }), + this.fetchLiveEntries(nodeId), + ]); + + // Step 3: Merge — DB entries already have isLive=false, live entries have isLive=true + const allEntries = [...dbEntries, ...liveEntries]; + + // Step 4: Sort by timestamp descending + allEntries.sort((a, b) => b.timestamp.localeCompare(a.timestamp)); + + // Step 5: Apply pagination + return allEntries.slice(opts.offset, opts.offset + opts.limit); + } + + /** + * Fetch events from all live sources in parallel, gracefully skipping failures. + */ + private async fetchLiveEntries(nodeId: string): Promise { + if (this.liveSources.size === 0) return []; + + const livePromises = Array.from(this.liveSources.entries()).map( + async ([sourceName, source]): Promise => { + try { + if (!source.isInitialized()) return []; + const events = await source.getNodeData(nodeId, "events"); + if (!Array.isArray(events)) return []; + return events.map((e) => this.transformToJournalEntry(e, sourceName)); + } catch { + // Graceful degradation: skip failed sources (Req 23.4) + return []; + } + } + ); + + const results = await Promise.all(livePromises); + return results.flat(); + } + + /** + * Transform a raw live-source event into a JournalEntry with isLive=true. + */ + private transformToJournalEntry( + event: unknown, + sourceName: string + ): JournalEntry { + const e = (event ?? {}) as Record; + return { + id: (typeof e.id === "string" ? e.id : null) ?? randomUUID(), + nodeId: typeof e.nodeId === "string" ? e.nodeId : "", + nodeUri: typeof e.nodeUri === "string" ? e.nodeUri : `${sourceName}:unknown`, + eventType: typeof e.eventType === "string" ? (e.eventType as JournalEntry["eventType"]) : "info", + source: sourceName as JournalEntry["source"], + action: typeof e.action === "string" ? e.action : "unknown", + summary: typeof e.summary === "string" ? e.summary : "Live event", + details: typeof e.details === "object" && e.details !== null ? (e.details as Record) : {}, + userId: typeof e.userId === "string" ? e.userId : undefined, + timestamp: typeof e.timestamp === "string" ? e.timestamp : new Date().toISOString(), + isLive: true, + }; + } +} diff --git a/backend/src/services/journal/types.ts b/backend/src/services/journal/types.ts new file mode 100644 index 00000000..f851a493 --- /dev/null +++ b/backend/src/services/journal/types.ts @@ -0,0 +1,128 @@ +import { z } from "zod"; + +/** + * Journal Service Types and Zod Schemas + * + * Type definitions and validation schemas for the nodes journal system. + * The journal records provisioning events, lifecycle actions, execution results, + * and manual notes for inventory nodes. + * + * Requirements: 25.1, 25.2, 25.3, 26.1, 26.2, 26.3, 26.4 + */ + +// ============================================================================ +// Enums +// ============================================================================ + +/** + * Valid journal event types. + * Requirements: 26.3 + */ +export const JournalEventTypeSchema = z.enum([ + "provision", + "destroy", + "start", + "stop", + "reboot", + "suspend", + "resume", + "command_execution", + "task_execution", + "puppet_run", + "package_install", + "config_change", + "note", + "error", + "warning", + "info", +]); + +export type JournalEventType = z.infer; + +/** + * Valid journal source identifiers. Uses integration-level names + * consistent with the single-plugin architecture. + * Requirements: 25.1, 25.2, 25.3 + */ +export const JournalSourceSchema = z.enum([ + "proxmox", + "aws", + "bolt", + "ansible", + "ssh", + "puppetdb", + "user", + "system", +]); + +export type JournalSource = z.infer; + +// ============================================================================ +// Journal Entry Schemas +// ============================================================================ + +/** + * Full journal entry as stored in the database and returned by queries. + * Requirements: 26.1 + */ +export const JournalEntrySchema = z.object({ + id: z.string().min(1), + nodeId: z.string().min(1), + nodeUri: z.string().min(1), + eventType: JournalEventTypeSchema, + source: JournalSourceSchema, + action: z.string().min(1), + summary: z.string().min(1), + details: z.record(z.unknown()).default({}), + userId: z.string().nullable().optional(), + timestamp: z.string().datetime({ message: "Timestamp must be ISO 8601 format" }), + isLive: z.boolean(), +}); + +export type JournalEntry = z.infer; + +/** + * Schema for creating a new journal entry. The id, timestamp, and isLive + * fields are auto-generated and not provided by the caller. + * Requirements: 22.1, 22.2, 22.3, 22.4 + */ +export const CreateJournalEntrySchema = z.object({ + nodeId: z.string().min(1), + nodeUri: z.string().min(1), + eventType: JournalEventTypeSchema, + source: JournalSourceSchema, + action: z.string().min(1), + summary: z.string().min(1), + details: z.record(z.unknown()).optional().default({}), + userId: z.string().nullable().optional(), +}); + +export type CreateJournalEntry = z.infer; + +// ============================================================================ +// Query Option Schemas +// ============================================================================ + +/** + * Options for timeline and aggregation queries. + */ +export const TimelineOptionsSchema = z.object({ + limit: z.number().int().min(1).max(200).default(50), + offset: z.number().int().min(0).default(0), + startDate: z.string().datetime().optional(), + endDate: z.string().datetime().optional(), + eventType: JournalEventTypeSchema.optional(), + source: JournalSourceSchema.optional(), +}); + +export type TimelineOptions = z.infer; + +/** + * Options for searching journal entries. + */ +export const SearchOptionsSchema = z.object({ + limit: z.number().int().min(1).max(200).default(50), + offset: z.number().int().min(0).default(0), +}); + +export type SearchOptions = z.infer; diff --git a/backend/test/AuthenticationService.test.ts b/backend/test/AuthenticationService.test.ts index a15fcb31..f0a07eb3 100644 --- a/backend/test/AuthenticationService.test.ts +++ b/backend/test/AuthenticationService.test.ts @@ -1,16 +1,18 @@ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../src/database/DatabaseAdapter'; import { AuthenticationService } from '../src/services/AuthenticationService'; import { randomUUID } from 'crypto'; describe('AuthenticationService', () => { - let db: Database; + let db: DatabaseAdapter; let authService: AuthenticationService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { - // Create in-memory database - db = new Database(':memory:'); + // Create in-memory database via SQLiteAdapter + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -20,12 +22,7 @@ describe('AuthenticationService', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); describe('Password Hashing', () => { @@ -71,7 +68,7 @@ describe('AuthenticationService', () => { const passwordHash = await authService.hashPassword('TestPass123!'); const now = new Date().toISOString(); - await runQuery(db, ` + await db.execute(` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, [userId, 'testuser', 'test@example.com', passwordHash, 'Test', 'User', 1, 0, now, now]); @@ -106,7 +103,7 @@ describe('AuthenticationService', () => { it('should reject authentication for inactive user', async () => { // Deactivate user - await runQuery(db, 'UPDATE users SET isActive = 0 WHERE username = ?', ['testuser']); + await db.execute('UPDATE users SET isActive = 0 WHERE username = ?', ['testuser']); const result = await authService.authenticate('testuser', 'TestPass123!'); @@ -117,7 +114,7 @@ describe('AuthenticationService', () => { it('should update lastLoginAt timestamp on successful authentication', async () => { await authService.authenticate('testuser', 'TestPass123!'); - const user = await getQuery<{ lastLoginAt: string }>(db, + const user = await db.queryOne<{ lastLoginAt: string }>( 'SELECT lastLoginAt FROM users WHERE username = ?', ['testuser'] ); @@ -175,12 +172,12 @@ describe('AuthenticationService', () => { const passwordHash = await authService.hashPassword('TestPass123!'); const now = new Date().toISOString(); - await runQuery(db, ` + await db.execute(` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, [userId, 'testuser', 'test@example.com', passwordHash, 'Test', 'User', 1, 0, now, now]); - testUser = await getQuery(db, 'SELECT * FROM users WHERE username = ?', ['testuser']); + testUser = await db.queryOne('SELECT * FROM users WHERE username = ?', ['testuser']); }); it('should generate valid JWT access token', async () => { @@ -228,7 +225,7 @@ describe('AuthenticationService', () => { expect(payload.jti).toBeDefined(); expect(typeof payload.jti).toBe('string'); - expect(payload.jti.length).toBeGreaterThan(0); + expect(payload.jti!.length).toBeGreaterThan(0); }); }); @@ -241,12 +238,12 @@ describe('AuthenticationService', () => { const passwordHash = await authService.hashPassword('TestPass123!'); const now = new Date().toISOString(); - await runQuery(db, ` + await db.execute(` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, [userId, 'testuser', 'test@example.com', passwordHash, 'Test', 'User', 1, 0, now, now]); - testUser = await getQuery(db, 'SELECT * FROM users WHERE username = ?', ['testuser']); + testUser = await db.queryOne('SELECT * FROM users WHERE username = ?', ['testuser']); validToken = await authService.generateToken(testUser); }); @@ -284,12 +281,12 @@ describe('AuthenticationService', () => { const passwordHash = await authService.hashPassword('TestPass123!'); const now = new Date().toISOString(); - await runQuery(db, ` + await db.execute(` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, [userId, 'testuser', 'test@example.com', passwordHash, 'Test', 'User', 1, 0, now, now]); - testUser = await getQuery(db, 'SELECT * FROM users WHERE username = ?', ['testuser']); + testUser = await db.queryOne('SELECT * FROM users WHERE username = ?', ['testuser']); refreshToken = await authService.generateRefreshToken(testUser); }); @@ -319,7 +316,7 @@ describe('AuthenticationService', () => { }); it('should reject refresh token for inactive user', async () => { - await runQuery(db, 'UPDATE users SET isActive = 0 WHERE id = ?', [testUser.id]); + await db.execute('UPDATE users SET isActive = 0 WHERE id = ?', [testUser.id]); const result = await authService.refreshToken(refreshToken); expect(result.success).toBe(false); @@ -336,12 +333,12 @@ describe('AuthenticationService', () => { const passwordHash = await authService.hashPassword('TestPass123!'); const now = new Date().toISOString(); - await runQuery(db, ` + await db.execute(` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, [userId, 'testuser', 'test@example.com', passwordHash, 'Test', 'User', 1, 0, now, now]); - testUser = await getQuery(db, 'SELECT * FROM users WHERE username = ?', ['testuser']); + testUser = await db.queryOne('SELECT * FROM users WHERE username = ?', ['testuser']); token = await authService.generateToken(testUser); }); @@ -354,7 +351,7 @@ describe('AuthenticationService', () => { it('should store revoked token in database', async () => { await authService.revokeToken(token); - const revokedTokens = await allQuery(db, 'SELECT * FROM revoked_tokens'); + const revokedTokens = await db.query('SELECT * FROM revoked_tokens'); expect(revokedTokens.length).toBeGreaterThan(0); }); @@ -379,24 +376,24 @@ describe('AuthenticationService', () => { const now = new Date().toISOString(); // Create user - await runQuery(db, ` + await db.execute(` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, [userId, 'testuser', 'test@example.com', passwordHash, 'Test', 'User', 1, 0, now, now]); // Create role - await runQuery(db, ` + await db.execute(` INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?) `, [roleId, 'TestRole', 'Test role', 0, now, now]); // Assign role to user - await runQuery(db, ` + await db.execute(` INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?) `, [userId, roleId, now]); - testUser = await getQuery(db, 'SELECT * FROM users WHERE username = ?', ['testuser']); + testUser = await db.queryOne('SELECT * FROM users WHERE username = ?', ['testuser']); }); it('should include user roles in token payload', async () => { @@ -413,25 +410,25 @@ describe('AuthenticationService', () => { const now = new Date().toISOString(); // Create group - await runQuery(db, ` + await db.execute(` INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?) `, [groupId, 'TestGroup', 'Test group', now, now]); // Create role - await runQuery(db, ` + await db.execute(` INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?) `, [roleId, 'GroupRole', 'Group role', 0, now, now]); // Add user to group - await runQuery(db, ` + await db.execute(` INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?) `, [testUser.id, groupId, now]); // Assign role to group - await runQuery(db, ` + await db.execute(` INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?) `, [groupId, roleId, now]); @@ -445,36 +442,9 @@ describe('AuthenticationService', () => { }); // Helper functions -function runQuery(db: Database, sql: string, params: any[] = []): Promise { - return new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); -} - -function getQuery(db: Database, sql: string, params: any[] = []): Promise { - return new Promise((resolve, reject) => { - db.get(sql, params, (err, row) => { - if (err) reject(err); - else resolve(row as T || null); - }); - }); -} - -function allQuery(db: Database, sql: string, params: any[] = []): Promise { - return new Promise((resolve, reject) => { - db.all(sql, params, (err, rows) => { - if (err) reject(err); - else resolve(rows as T[] || []); - }); - }); -} - -async function initializeSchema(db: Database): Promise { - const schema = ` - CREATE TABLE users ( +async function initializeSchema(db: DatabaseAdapter): Promise { + const statements = [ + `CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE, @@ -486,82 +456,87 @@ async function initializeSchema(db: Database): Promise { createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT - ); - - CREATE TABLE groups ( + )`, + `CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( + )`, + `CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, isBuiltIn INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL - ); - - CREATE TABLE permissions ( + )`, + `CREATE TABLE permissions ( id TEXT PRIMARY KEY, resource TEXT NOT NULL, action TEXT NOT NULL, description TEXT NOT NULL, createdAt TEXT NOT NULL, UNIQUE(resource, action) - ); - - CREATE TABLE user_groups ( + )`, + `CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE - ); - - CREATE TABLE user_roles ( + )`, + `CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE group_roles ( + )`, + `CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId), FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE role_permissions ( + )`, + `CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId), FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE - ); - - CREATE TABLE revoked_tokens ( + )`, + `CREATE TABLE revoked_tokens ( token TEXT PRIMARY KEY, userId TEXT NOT NULL, revokedAt TEXT NOT NULL, expiresAt TEXT NOT NULL, FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE - ); - `; - - const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); + )`, + `CREATE TABLE account_lockouts ( + username TEXT PRIMARY KEY, + lockoutType TEXT NOT NULL, + lockedAt TEXT NOT NULL, + lockedUntil TEXT, + failedAttempts INTEGER NOT NULL DEFAULT 0, + lastAttemptAt TEXT + )`, + `CREATE TABLE failed_login_attempts ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + username TEXT NOT NULL, + attemptedAt TEXT NOT NULL, + ipAddress TEXT, + reason TEXT NOT NULL + )` + ]; for (const statement of statements) { - await runQuery(db, statement); + await db.execute(statement); } } diff --git a/backend/test/GroupService.test.ts b/backend/test/GroupService.test.ts index 2656d695..6e296bcf 100644 --- a/backend/test/GroupService.test.ts +++ b/backend/test/GroupService.test.ts @@ -1,15 +1,16 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../src/database/SQLiteAdapter'; import { GroupService } from '../src/services/GroupService'; import { randomUUID } from 'crypto'; describe('GroupService', () => { - let db: Database; + let db: SQLiteAdapter; let groupService: GroupService; beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -19,12 +20,7 @@ describe('GroupService', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); describe('createGroup', () => { @@ -854,27 +850,17 @@ describe('GroupService', () => { }); // Helper functions -function runQuery(db: Database, sql: string, params: any[] = []): Promise { - return new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); +function runQuery(db: SQLiteAdapter, sql: string, params: any[] = []): Promise { + return db.execute(sql, params).then(() => {}); } -function allQuery(db: Database, sql: string, params: any[] = []): Promise { - return new Promise((resolve, reject) => { - db.all(sql, params, (err, rows) => { - if (err) reject(err); - else resolve(rows as T[] || []); - }); - }); +function allQuery(db: SQLiteAdapter, sql: string, params: any[] = []): Promise { + return db.query(sql, params); } -async function initializeSchema(db: Database): Promise { +async function initializeSchema(db: SQLiteAdapter): Promise { // Enable foreign key constraints (required for CASCADE to work in SQLite) - await runQuery(db, 'PRAGMA foreign_keys = ON'); + // foreign_keys already enabled by SQLiteAdapter.initialize() const schema = ` CREATE TABLE users ( diff --git a/backend/test/RoleService.test.ts b/backend/test/RoleService.test.ts index 6e04aa9f..4f9a710d 100644 --- a/backend/test/RoleService.test.ts +++ b/backend/test/RoleService.test.ts @@ -1,19 +1,18 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../src/database/SQLiteAdapter'; import { RoleService, CreateRoleDTO, UpdateRoleDTO } from '../src/services/RoleService'; describe('RoleService', () => { - let db: Database; + let db: SQLiteAdapter; let roleService: RoleService; beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Create schema - await new Promise((resolve, reject) => { - db.exec( - ` + const schema = ` CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, @@ -41,32 +40,35 @@ describe('RoleService', () => { FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE ); - -- Seed built-in roles INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES - ('role-viewer-001', 'Viewer', 'Read-only access', 1, datetime('now'), datetime('now')), - ('role-operator-001', 'Operator', 'Read and execute access', 1, datetime('now'), datetime('now')), + ('role-viewer-001', 'Viewer', 'Read-only access', 1, datetime('now'), datetime('now')); + + INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES + ('role-operator-001', 'Operator', 'Read and execute access', 1, datetime('now'), datetime('now')); + + INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES ('role-admin-001', 'Administrator', 'Full access', 1, datetime('now'), datetime('now')); - -- Seed permissions INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES - ('perm-ansible-read', 'ansible', 'read', 'View Ansible resources', datetime('now')), - ('perm-ansible-write', 'ansible', 'write', 'Modify Ansible resources', datetime('now')), + ('perm-ansible-read', 'ansible', 'read', 'View Ansible resources', datetime('now')); + + INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES + ('perm-ansible-write', 'ansible', 'write', 'Modify Ansible resources', datetime('now')); + + INSERT INTO permissions (id, resource, "action", description, createdAt) VALUES ('perm-bolt-read', 'bolt', 'read', 'View Bolt resources', datetime('now')); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + `; + + const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); + for (const statement of statements) { + await db.execute(statement); + } roleService = new RoleService(db); }); afterEach(async () => { - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); }); describe('createRole', () => { diff --git a/backend/test/UserService.password.test.ts b/backend/test/UserService.password.test.ts index e036b3aa..78800890 100644 --- a/backend/test/UserService.password.test.ts +++ b/backend/test/UserService.password.test.ts @@ -1,17 +1,18 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../src/database/SQLiteAdapter'; import { UserService } from '../src/services/UserService'; import { AuthenticationService } from '../src/services/AuthenticationService'; describe('UserService - Password Validation Integration', () => { - let db: Database; + let db: SQLiteAdapter; let userService: UserService; let authService: AuthenticationService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -21,10 +22,7 @@ describe('UserService - Password Validation Integration', () => { }); afterEach(async () => { - // Close database - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); }); describe('createUser - password validation', () => { @@ -185,7 +183,7 @@ describe('UserService - Password Validation Integration', () => { // Helper function to initialize database schema -async function initializeSchema(db: Database): Promise { +async function initializeSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE users ( id TEXT PRIMARY KEY, @@ -280,12 +278,13 @@ async function initializeSchema(db: Database): Promise { INSERT INTO config (key, value, updatedAt) VALUES ('allow_self_registration', 'false', datetime('now')), ('default_new_user_role', 'role-viewer-001', datetime('now')); + + INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES + ('role-viewer-001', 'Viewer', 'Default viewer role', 1, datetime('now'), datetime('now')); `; - return new Promise((resolve, reject) => { - db.exec(schema, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); + for (const statement of statements) { + await db.execute(statement); + } } diff --git a/backend/test/UserService.test.ts b/backend/test/UserService.test.ts index 39e10e31..83427e9e 100644 --- a/backend/test/UserService.test.ts +++ b/backend/test/UserService.test.ts @@ -1,17 +1,19 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../src/database/DatabaseAdapter'; import { UserService, CreateUserDTO, UpdateUserDTO } from '../src/services/UserService'; import { AuthenticationService } from '../src/services/AuthenticationService'; import { randomUUID } from 'crypto'; describe('UserService', () => { - let db: Database; + let db: DatabaseAdapter; let userService: UserService; let authService: AuthenticationService; beforeEach(async () => { - // Create in-memory database - db = new Database(':memory:'); + // Create in-memory database via SQLiteAdapter + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -22,7 +24,7 @@ describe('UserService', () => { }); afterEach(async () => { - await closeDatabase(db); + await db.close(); }); describe('createUser', () => { @@ -496,11 +498,15 @@ describe('UserService', () => { const roleId = await createTestRole(db, 'Test Role'); + // User already has default Viewer role from createUser + const rolesBefore = await userService.getUserRoles(user.id); + const countBefore = rolesBefore.length; + await userService.assignRoleToUser(user.id, roleId); const roles = await userService.getUserRoles(user.id); - expect(roles.length).toBe(1); - expect(roles[0].name).toBe('Test Role'); + expect(roles.length).toBe(countBefore + 1); + expect(roles.map(r => r.name)).toContain('Test Role'); }); it('should throw error if user not found', async () => { @@ -556,13 +562,17 @@ describe('UserService', () => { const roleId = await createTestRole(db, 'Test Role'); + // User already has default Viewer role from createUser + const rolesBefore = await userService.getUserRoles(user.id); + const countBefore = rolesBefore.length; + await userService.assignRoleToUser(user.id, roleId); let roles = await userService.getUserRoles(user.id); - expect(roles.length).toBe(1); + expect(roles.length).toBe(countBefore + 1); await userService.removeRoleFromUser(user.id, roleId); roles = await userService.getUserRoles(user.id); - expect(roles.length).toBe(0); + expect(roles.length).toBe(countBefore); }); it('should throw error if role not assigned', async () => { @@ -647,119 +657,120 @@ describe('UserService', () => { // Helper functions -async function initializeSchema(db: Database): Promise { - return new Promise((resolve, reject) => { - db.exec(` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT UNIQUE NOT NULL, - email TEXT UNIQUE NOT NULL, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER DEFAULT 1, - isAdmin INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - isBuiltIn INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (groupId) REFERENCES groups(id) - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (roleId) REFERENCES roles(id) - ); - - CREATE TABLE revoked_tokens ( - token TEXT PRIMARY KEY, - userId TEXT NOT NULL, - revokedAt TEXT NOT NULL, - expiresAt TEXT NOT NULL - ); - - CREATE TABLE config ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - -- Insert default config values - INSERT INTO config (key, value, updatedAt) VALUES - ('allow_self_registration', 'false', datetime('now')), - ('default_new_user_role', 'role-viewer-001', datetime('now')); - `, (err) => { - if (err) reject(err); - else resolve(); - }); - }); -} - -async function closeDatabase(db: Database): Promise { - return new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function initializeSchema(db: DatabaseAdapter): Promise { + await db.execute(` + CREATE TABLE users ( + id TEXT PRIMARY KEY, + username TEXT UNIQUE NOT NULL, + email TEXT UNIQUE NOT NULL, + passwordHash TEXT NOT NULL, + firstName TEXT NOT NULL, + lastName TEXT NOT NULL, + isActive INTEGER DEFAULT 1, + isAdmin INTEGER DEFAULT 0, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL, + lastLoginAt TEXT + ) + `); + + await db.execute(` + CREATE TABLE groups ( + id TEXT PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + description TEXT, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL + ) + `); + + await db.execute(` + CREATE TABLE roles ( + id TEXT PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + description TEXT, + isBuiltIn INTEGER DEFAULT 0, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL + ) + `); + + await db.execute(` + CREATE TABLE user_groups ( + userId TEXT NOT NULL, + groupId TEXT NOT NULL, + assignedAt TEXT NOT NULL, + PRIMARY KEY (userId, groupId), + FOREIGN KEY (userId) REFERENCES users(id), + FOREIGN KEY (groupId) REFERENCES groups(id) + ) + `); + + await db.execute(` + CREATE TABLE user_roles ( + userId TEXT NOT NULL, + roleId TEXT NOT NULL, + assignedAt TEXT NOT NULL, + PRIMARY KEY (userId, roleId), + FOREIGN KEY (userId) REFERENCES users(id), + FOREIGN KEY (roleId) REFERENCES roles(id) + ) + `); + + await db.execute(` + CREATE TABLE revoked_tokens ( + token TEXT PRIMARY KEY, + userId TEXT NOT NULL, + revokedAt TEXT NOT NULL, + expiresAt TEXT NOT NULL + ) + `); + + await db.execute(` + CREATE TABLE config ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL, + updatedAt TEXT NOT NULL + ) + `); + + // Insert default config values + await db.execute( + `INSERT INTO config (key, value, updatedAt) VALUES (?, ?, datetime('now'))`, + ['allow_self_registration', 'false'] + ); + await db.execute( + `INSERT INTO config (key, value, updatedAt) VALUES (?, ?, datetime('now'))`, + ['default_new_user_role', 'role-viewer-001'] + ); + + // Seed the default viewer role so FK constraints are satisfied + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, datetime('now'), datetime('now'))`, + ['role-viewer-001', 'Viewer', 'Default viewer role', 1] + ); } -async function createTestGroup(db: Database, name: string): Promise { +async function createTestGroup(db: DatabaseAdapter, name: string): Promise { const groupId = randomUUID(); const now = new Date().toISOString(); - return new Promise((resolve, reject) => { - db.run( - 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', - [groupId, name, 'Test group description', now, now], - (err) => { - if (err) reject(err); - else resolve(groupId); - } - ); - }); + await db.execute( + 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', + [groupId, name, 'Test group description', now, now] + ); + + return groupId; } -async function createTestRole(db: Database, name: string): Promise { +async function createTestRole(db: DatabaseAdapter, name: string): Promise { const roleId = randomUUID(); const now = new Date().toISOString(); - return new Promise((resolve, reject) => { - db.run( - 'INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)', - [roleId, name, 'Test role description', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(roleId); - } - ); - }); + await db.execute( + 'INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)', + [roleId, name, 'Test role description', 0, now, now] + ); + + return roleId; } diff --git a/backend/test/config/ConfigService.test.ts b/backend/test/config/ConfigService.test.ts index 322a99a2..45e8e9db 100644 --- a/backend/test/config/ConfigService.test.ts +++ b/backend/test/config/ConfigService.test.ts @@ -156,4 +156,86 @@ describe("ConfigService - Integration Configuration", () => { expect(integrations.puppetdb?.enabled).toBe(true); }); }); + + describe("AWS Configuration", () => { + beforeEach(() => { + delete process.env.AWS_ENABLED; + delete process.env.AWS_ACCESS_KEY_ID; + delete process.env.AWS_SECRET_ACCESS_KEY; + delete process.env.AWS_DEFAULT_REGION; + delete process.env.AWS_SESSION_TOKEN; + delete process.env.AWS_PROFILE; + delete process.env.AWS_ENDPOINT; + }); + + it("should load AWS configuration when enabled", () => { + process.env.AWS_ENABLED = "true"; // pragma: allowlist secret + process.env.AWS_ACCESS_KEY_ID = "AKIAIOSFODNN7EXAMPLE"; // pragma: allowlist secret + process.env.AWS_SECRET_ACCESS_KEY = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; // pragma: allowlist secret + process.env.AWS_DEFAULT_REGION = "us-west-2"; // pragma: allowlist secret + + const configService = new ConfigService(); + const awsConfig = configService.getAWSConfig(); + + expect(awsConfig).not.toBeNull(); + expect(awsConfig?.enabled).toBe(true); + expect(awsConfig?.accessKeyId).toBe("AKIAIOSFODNN7EXAMPLE"); + expect(awsConfig?.secretAccessKey).toBe("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"); + expect(awsConfig?.region).toBe("us-west-2"); + }); + + it("should return null when AWS is not enabled", () => { + const configService = new ConfigService(); + const awsConfig = configService.getAWSConfig(); + + expect(awsConfig).toBeNull(); + }); + + it("should use default region when AWS_DEFAULT_REGION is not set", () => { + process.env.AWS_ENABLED = "true"; // pragma: allowlist secret + process.env.AWS_ACCESS_KEY_ID = "AKIAIOSFODNN7EXAMPLE"; // pragma: allowlist secret + process.env.AWS_SECRET_ACCESS_KEY = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; // pragma: allowlist secret + + const configService = new ConfigService(); + const awsConfig = configService.getAWSConfig(); + + expect(awsConfig).not.toBeNull(); + expect(awsConfig?.region).toBe("us-east-1"); + }); + + it("should include AWS in integrations config when enabled", () => { + process.env.AWS_ENABLED = "true"; // pragma: allowlist secret + process.env.AWS_ACCESS_KEY_ID = "AKIAIOSFODNN7EXAMPLE"; // pragma: allowlist secret + process.env.AWS_SECRET_ACCESS_KEY = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; // pragma: allowlist secret + + const configService = new ConfigService(); + const integrations = configService.getIntegrationsConfig(); + + expect(integrations.aws).toBeDefined(); + expect(integrations.aws?.enabled).toBe(true); + }); + + it("should not include AWS in integrations when disabled", () => { + const configService = new ConfigService(); + const integrations = configService.getIntegrationsConfig(); + + expect(integrations.aws).toBeUndefined(); + }); + + it("should parse optional AWS fields", () => { + process.env.AWS_ENABLED = "true"; // pragma: allowlist secret + process.env.AWS_ACCESS_KEY_ID = "AKIAIOSFODNN7EXAMPLE"; // pragma: allowlist secret + process.env.AWS_SECRET_ACCESS_KEY = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; // pragma: allowlist secret + process.env.AWS_SESSION_TOKEN = "FwoGZXIvYXdzEBYaDH"; // pragma: allowlist secret + process.env.AWS_PROFILE = "production"; // pragma: allowlist secret + process.env.AWS_ENDPOINT = "http://localhost:4566"; // pragma: allowlist secret + + const configService = new ConfigService(); + const awsConfig = configService.getAWSConfig(); + + expect(awsConfig?.sessionToken).toBe("FwoGZXIvYXdzEBYaDH"); + expect(awsConfig?.profile).toBe("production"); + expect(awsConfig?.endpoint).toBe("http://localhost:4566"); + }); + }); }); diff --git a/backend/test/database/006_add_batch_executions.test.ts b/backend/test/database/006_add_batch_executions.test.ts index a81d60a9..acab21e7 100644 --- a/backend/test/database/006_add_batch_executions.test.ts +++ b/backend/test/database/006_add_batch_executions.test.ts @@ -1,59 +1,84 @@ import { describe, it, expect, beforeEach, afterEach } from "vitest"; -import sqlite3 from "sqlite3"; -import { MigrationRunner } from "../../src/database/MigrationRunner"; -import { readFileSync } from "fs"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; +import { readFileSync, readdirSync } from "fs"; import { join } from "path"; +const migrationsDir = join(__dirname, "../../src/database/migrations"); + +/** + * Apply migrations from 000 up to (but not including) the given stopBefore id. + */ +async function applyMigrationsUpTo(db: DatabaseAdapter, stopBeforeId: string): Promise { + const files = readdirSync(migrationsDir) + .filter(f => f.endsWith(".sql") && !f.includes(".sqlite.") && !f.includes(".postgres.")) + .sort(); + + for (const file of files) { + const id = file.split("_")[0]; + if (id >= stopBeforeId) break; + + const sql = readFileSync(join(migrationsDir, file), "utf-8"); + const statements = sql + .split(";") + .map(s => s.trim()) + .filter(s => { + if (s.length === 0) return false; + const withoutComments = s + .split("\n") + .map(line => line.replace(/--.*$/, "").trim()) + .filter(line => line.length > 0) + .join("\n"); + return withoutComments.length > 0; + }); + for (const statement of statements) { + await db.execute(statement); + } + } +} + +async function applyMigration006(db: DatabaseAdapter): Promise { + const sql = readFileSync( + join(migrationsDir, "006_add_batch_executions.sql"), + "utf-8" + ); + const statements = sql + .split(";") + .map(s => s.trim()) + .filter(s => { + if (s.length === 0) return false; + const withoutComments = s + .split("\n") + .map(line => line.replace(/--.*$/, "").trim()) + .filter(line => line.length > 0) + .join("\n"); + return withoutComments.length > 0; + }); + for (const statement of statements) { + await db.execute(statement); + } +} + describe("Migration 006: Add batch executions", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; beforeEach(async () => { - db = new sqlite3.Database(":memory:"); - - // Apply base schema first (executions table) - const baseSchema = readFileSync( - join(__dirname, "../../src/database/schema.sql"), - "utf-8" - ); - await new Promise((resolve, reject) => { - db.exec(baseSchema, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + db = new SQLiteAdapter(":memory:"); + await db.initialize(); + await applyMigrationsUpTo(db, "006"); }); afterEach(async () => { - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); }); it("should create batch_executions table with correct schema", async () => { - const migrationSQL = readFileSync( - join(__dirname, "../../src/database/migrations/006_add_batch_executions.sql"), - "utf-8" - ); - - await new Promise((resolve, reject) => { - db.exec(migrationSQL, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Verify batch_executions table exists - const tableInfo = await new Promise((resolve, reject) => { - db.all("PRAGMA table_info(batch_executions)", (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); + await applyMigration006(db); + const tableInfo = await db.query<{ name: string }>("PRAGMA table_info(batch_executions)"); expect(tableInfo.length).toBeGreaterThan(0); - // Verify required columns exist - const columnNames = tableInfo.map((col: any) => col.name); + const columnNames = tableInfo.map(col => col.name); expect(columnNames).toContain("id"); expect(columnNames).toContain("type"); expect(columnNames).toContain("action"); @@ -74,133 +99,50 @@ describe("Migration 006: Add batch executions", () => { }); it("should create indexes for batch_executions table", async () => { - const migrationSQL = readFileSync( - join(__dirname, "../../src/database/migrations/006_add_batch_executions.sql"), - "utf-8" - ); + await applyMigration006(db); - await new Promise((resolve, reject) => { - db.exec(migrationSQL, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Verify indexes exist - const indexes = await new Promise((resolve, reject) => { - db.all( - "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='batch_executions'", - (err, rows) => { - if (err) reject(err); - else resolve(rows); - } - ); - }); - - const indexNames = indexes.map((idx: any) => idx.name); + const indexes = await db.query<{ name: string }>( + "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='batch_executions'" + ); + const indexNames = indexes.map(idx => idx.name); expect(indexNames).toContain("idx_batch_executions_created"); expect(indexNames).toContain("idx_batch_executions_status"); expect(indexNames).toContain("idx_batch_executions_user"); }); it("should add batch_id and batch_position columns to executions table", async () => { - const migrationSQL = readFileSync( - join(__dirname, "../../src/database/migrations/006_add_batch_executions.sql"), - "utf-8" - ); - - await new Promise((resolve, reject) => { - db.exec(migrationSQL, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await applyMigration006(db); - // Verify executions table has new columns - const tableInfo = await new Promise((resolve, reject) => { - db.all("PRAGMA table_info(executions)", (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); - - const columnNames = tableInfo.map((col: any) => col.name); + const tableInfo = await db.query<{ name: string }>("PRAGMA table_info(executions)"); + const columnNames = tableInfo.map(col => col.name); expect(columnNames).toContain("batch_id"); expect(columnNames).toContain("batch_position"); }); it("should create index for batch queries on executions table", async () => { - const migrationSQL = readFileSync( - join(__dirname, "../../src/database/migrations/006_add_batch_executions.sql"), - "utf-8" - ); + await applyMigration006(db); - await new Promise((resolve, reject) => { - db.exec(migrationSQL, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Verify batch index exists - const indexes = await new Promise((resolve, reject) => { - db.all( - "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='executions'", - (err, rows) => { - if (err) reject(err); - else resolve(rows); - } - ); - }); - - const indexNames = indexes.map((idx: any) => idx.name); + const indexes = await db.query<{ name: string }>( + "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='executions'" + ); + const indexNames = indexes.map(idx => idx.name); expect(indexNames).toContain("idx_executions_batch"); }); it("should preserve existing data when migrating executions table", async () => { // Insert test data before migration - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO executions ( - id, type, target_nodes, action, status, started_at, results - ) VALUES (?, ?, ?, ?, ?, ?, ?)`, - [ - "test-exec-1", - "command", - '["node1"]', - "uptime", - "success", - "2024-01-01T00:00:00Z", - "[]" - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); - - // Apply migration - const migrationSQL = readFileSync( - join(__dirname, "../../src/database/migrations/006_add_batch_executions.sql"), - "utf-8" + await db.execute( + `INSERT INTO executions (id, type, target_nodes, action, status, started_at, results) + VALUES (?, ?, ?, ?, ?, ?, ?)`, + ["test-exec-1", "command", '["node1"]', "uptime", "success", "2024-01-01T00:00:00Z", "[]"] ); - await new Promise((resolve, reject) => { - db.exec(migrationSQL, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Verify data was preserved - const rows = await new Promise((resolve, reject) => { - db.all("SELECT * FROM executions WHERE id = ?", ["test-exec-1"], (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); + await applyMigration006(db); + const rows = await db.query>( + "SELECT * FROM executions WHERE id = ?", + ["test-exec-1"] + ); expect(rows.length).toBe(1); expect(rows[0].id).toBe("test-exec-1"); expect(rows[0].type).toBe("command"); @@ -210,57 +152,25 @@ describe("Migration 006: Add batch executions", () => { }); it("should allow inserting batch execution records", async () => { - const migrationSQL = readFileSync( - join(__dirname, "../../src/database/migrations/006_add_batch_executions.sql"), - "utf-8" + await applyMigration006(db); + + await db.execute( + `INSERT INTO batch_executions ( + id, type, action, target_nodes, target_groups, status, + created_at, user_id, execution_ids, + stats_total, stats_queued, stats_running, stats_success, stats_failed + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + [ + "batch-1", "command", "uptime", '["node1", "node2"]', '["group1"]', + "running", "2024-01-01T00:00:00Z", "user1", '["exec1", "exec2"]', + 2, 2, 0, 0, 0 + ] ); - await new Promise((resolve, reject) => { - db.exec(migrationSQL, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Insert a batch execution - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO batch_executions ( - id, type, action, target_nodes, target_groups, status, - created_at, user_id, execution_ids, - stats_total, stats_queued, stats_running, stats_success, stats_failed - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - "batch-1", - "command", - "uptime", - '["node1", "node2"]', - '["group1"]', - "running", - "2024-01-01T00:00:00Z", - "user1", - '["exec1", "exec2"]', - 2, - 2, - 0, - 0, - 0 - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); - - // Verify insertion - const rows = await new Promise((resolve, reject) => { - db.all("SELECT * FROM batch_executions WHERE id = ?", ["batch-1"], (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); - + const rows = await db.query>( + "SELECT * FROM batch_executions WHERE id = ?", + ["batch-1"] + ); expect(rows.length).toBe(1); expect(rows[0].id).toBe("batch-1"); expect(rows[0].type).toBe("command"); @@ -268,45 +178,21 @@ describe("Migration 006: Add batch executions", () => { }); it("should enforce CHECK constraints on batch_executions", async () => { - const migrationSQL = readFileSync( - join(__dirname, "../../src/database/migrations/006_add_batch_executions.sql"), - "utf-8" - ); - - await new Promise((resolve, reject) => { - db.exec(migrationSQL, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await applyMigration006(db); - // Try to insert invalid type - const insertInvalidType = new Promise((resolve, reject) => { - db.run( + await expect( + db.execute( `INSERT INTO batch_executions ( id, type, action, target_nodes, target_groups, status, created_at, user_id, execution_ids, stats_total, stats_queued, stats_running, stats_success, stats_failed ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, [ - "batch-invalid", - "invalid_type", - "uptime", - "[]", - "[]", - "running", - "2024-01-01T00:00:00Z", - "user1", - "[]", + "batch-invalid", "invalid_type", "uptime", "[]", "[]", + "running", "2024-01-01T00:00:00Z", "user1", "[]", 0, 0, 0, 0, 0 - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); - - await expect(insertInvalidType).rejects.toThrow(); + ] + ) + ).rejects.toThrow(); }); }); diff --git a/backend/test/database/AdapterFactory.test.ts b/backend/test/database/AdapterFactory.test.ts new file mode 100644 index 00000000..b125979c --- /dev/null +++ b/backend/test/database/AdapterFactory.test.ts @@ -0,0 +1,73 @@ +import { describe, it, expect, beforeEach, afterEach } from "vitest"; +import { DatabaseQueryError, DatabaseConnectionError } from "../../src/database/errors"; + +describe("AdapterFactory", () => { + const originalEnv = process.env; + + beforeEach(() => { + process.env = { ...originalEnv }; + }); + + afterEach(() => { + process.env = originalEnv; + }); + + it("returns SQLiteAdapter when DB_TYPE is unset", async () => { + delete process.env.DB_TYPE; + const { createDatabaseAdapter } = await import("../../src/database/AdapterFactory"); + const adapter = await createDatabaseAdapter({ databasePath: ":memory:" }); + expect(adapter.getDialect()).toBe("sqlite"); + expect(adapter.getPlaceholder(1)).toBe("?"); + }); + + it("returns SQLiteAdapter when DB_TYPE is 'sqlite'", async () => { + process.env.DB_TYPE = "sqlite"; + const { createDatabaseAdapter } = await import("../../src/database/AdapterFactory"); + const adapter = await createDatabaseAdapter({ databasePath: ":memory:" }); + expect(adapter.getDialect()).toBe("sqlite"); + }); + + it("throws when DB_TYPE is 'postgres' without DATABASE_URL", async () => { + process.env.DB_TYPE = "postgres"; + delete process.env.DATABASE_URL; + const { createDatabaseAdapter } = await import("../../src/database/AdapterFactory"); + await expect(createDatabaseAdapter({ databasePath: "" })).rejects.toThrow( + "DATABASE_URL environment variable is required" + ); + }); + + it("returns PostgresAdapter when DB_TYPE is 'postgres' with DATABASE_URL", async () => { + process.env.DB_TYPE = "postgres"; + process.env.DATABASE_URL = "postgres://localhost/test"; + const { createDatabaseAdapter } = await import("../../src/database/AdapterFactory"); + const adapter = await createDatabaseAdapter({ databasePath: "" }); + expect(adapter.getDialect()).toBe("postgres"); + expect(adapter.getPlaceholder(3)).toBe("$3"); + }); +}); + +describe("DatabaseQueryError", () => { + it("captures query and params context", () => { + const err = new DatabaseQueryError("fail", "SELECT 1", [42]); + expect(err.message).toBe("fail"); + expect(err.name).toBe("DatabaseQueryError"); + expect(err.query).toBe("SELECT 1"); + expect(err.params).toEqual([42]); + expect(err).toBeInstanceOf(Error); + }); + + it("works without params", () => { + const err = new DatabaseQueryError("fail", "SELECT 1"); + expect(err.params).toBeUndefined(); + }); +}); + +describe("DatabaseConnectionError", () => { + it("captures connection details", () => { + const err = new DatabaseConnectionError("timeout", "localhost:5432"); + expect(err.message).toBe("timeout"); + expect(err.name).toBe("DatabaseConnectionError"); + expect(err.connectionDetails).toBe("localhost:5432"); + expect(err).toBeInstanceOf(Error); + }); +}); diff --git a/backend/test/database/ExecutionRepository.test.ts b/backend/test/database/ExecutionRepository.test.ts index 52ee0222..97c135b6 100644 --- a/backend/test/database/ExecutionRepository.test.ts +++ b/backend/test/database/ExecutionRepository.test.ts @@ -1,5 +1,6 @@ import { describe, it, expect, beforeEach, afterEach } from "vitest"; -import sqlite3 from "sqlite3"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; import { ExecutionRepository, ExecutionType, @@ -7,15 +8,16 @@ import { } from "../../src/database/ExecutionRepository"; describe("ExecutionRepository", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let repository: ExecutionRepository; beforeEach(async () => { - // Create in-memory database for testing - db = new sqlite3.Database(":memory:"); + // Create in-memory database via SQLiteAdapter + db = new SQLiteAdapter(":memory:"); + await db.initialize(); // Create executions table - const schema = ` + await db.execute(` CREATE TABLE executions ( id TEXT PRIMARY KEY, type TEXT NOT NULL, @@ -37,22 +39,13 @@ describe("ExecutionRepository", () => { batch_id TEXT, batch_position INTEGER ) - `; - - await new Promise((resolve, reject) => { - db.run(schema, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + `, []); repository = new ExecutionRepository(db); }); afterEach(async () => { - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); }); describe("constructor", () => { diff --git a/backend/test/database/MigrationRunner.test.ts b/backend/test/database/MigrationRunner.test.ts index e096b1f2..fa39c7ed 100644 --- a/backend/test/database/MigrationRunner.test.ts +++ b/backend/test/database/MigrationRunner.test.ts @@ -1,18 +1,17 @@ import { describe, it, expect, beforeEach, afterEach } from "vitest"; -import sqlite3 from "sqlite3"; import { MigrationRunner } from "../../src/database/MigrationRunner"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; import { mkdirSync, writeFileSync, rmSync, existsSync } from "fs"; import { join } from "path"; describe("MigrationRunner", () => { - let db: sqlite3.Database; + let db: SQLiteAdapter; let testMigrationsDir: string; beforeEach(async () => { - // Create in-memory database for testing - db = new sqlite3.Database(":memory:"); + db = new SQLiteAdapter(":memory:"); + await db.initialize(); - // Create temporary migrations directory testMigrationsDir = join(__dirname, "test-migrations"); if (existsSync(testMigrationsDir)) { rmSync(testMigrationsDir, { recursive: true }); @@ -21,40 +20,24 @@ describe("MigrationRunner", () => { }); afterEach(async () => { - // Close database - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Clean up test migrations directory + await db.close(); + if (existsSync(testMigrationsDir)) { rmSync(testMigrationsDir, { recursive: true }); } }); - it("should initialize migrations table", async () => { + it("creates migrations table on first run", async () => { const runner = new MigrationRunner(db, testMigrationsDir); await runner.runPendingMigrations(); - // Check that migrations table exists - const result = await new Promise<{ count: number }>((resolve, reject) => { - db.get( - "SELECT COUNT(*) as count FROM sqlite_master WHERE type='table' AND name='migrations'", - (err, row: { count: number }) => { - if (err) reject(err); - else resolve(row); - } - ); - }); - - expect(result.count).toBe(1); + const result = await db.queryOne<{ count: number }>( + "SELECT COUNT(*) as count FROM sqlite_master WHERE type='table' AND name='migrations'" + ); + expect(result?.count).toBe(1); }); - it("should run pending migrations in order", async () => { - // Create test migration files + it("applies pending migrations in order", async () => { writeFileSync( join(testMigrationsDir, "001_create_users.sql"), "CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT)" @@ -66,49 +49,27 @@ describe("MigrationRunner", () => { const runner = new MigrationRunner(db, testMigrationsDir); const appliedCount = await runner.runPendingMigrations(); - expect(appliedCount).toBe(2); - // Verify migrations were recorded - const migrations = await new Promise>( - (resolve, reject) => { - db.all( - "SELECT id, name FROM migrations ORDER BY id", - (err, rows: Array<{ id: string; name: string }>) => { - if (err) reject(err); - else resolve(rows); - } - ); - } + const migrations = await db.query<{ id: string; name: string }>( + "SELECT id, name FROM migrations ORDER BY id" ); - expect(migrations).toHaveLength(2); expect(migrations[0].id).toBe("001"); expect(migrations[0].name).toBe("001_create_users.sql"); expect(migrations[1].id).toBe("002"); expect(migrations[1].name).toBe("002_add_email.sql"); - // Verify table was created and column added - const tableInfo = await new Promise>( - (resolve, reject) => { - db.all( - "PRAGMA table_info(users)", - (err, rows: Array<{ name: string }>) => { - if (err) reject(err); - else resolve(rows); - } - ); - } + const tableInfo = await db.query<{ name: string }>( + "PRAGMA table_info(users)" ); - const columnNames = tableInfo.map((col) => col.name); expect(columnNames).toContain("id"); expect(columnNames).toContain("name"); expect(columnNames).toContain("email"); }); - it("should not re-run already applied migrations", async () => { - // Create test migration + it("skips already-applied migrations", async () => { writeFileSync( join(testMigrationsDir, "001_create_users.sql"), "CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT)" @@ -116,23 +77,113 @@ describe("MigrationRunner", () => { const runner = new MigrationRunner(db, testMigrationsDir); - // Run migrations first time const firstRun = await runner.runPendingMigrations(); expect(firstRun).toBe(1); - // Run migrations second time const secondRun = await runner.runPendingMigrations(); expect(secondRun).toBe(0); }); - it("should handle multi-statement migrations", async () => { - // Create migration with multiple statements + it("selects dialect-specific files over shared files", async () => { + // Shared file + writeFileSync( + join(testMigrationsDir, "001_create_users.sql"), + "CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT DEFAULT 'shared')" + ); + // SQLite-specific file (should win since our adapter is sqlite) + writeFileSync( + join(testMigrationsDir, "001_create_users.sqlite.sql"), + "CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT DEFAULT 'sqlite')" + ); + + const runner = new MigrationRunner(db, testMigrationsDir); + await runner.runPendingMigrations(); + + // Verify the sqlite-specific migration was used + const migrations = await db.query<{ name: string }>( + "SELECT name FROM migrations" + ); + expect(migrations[0].name).toBe("001_create_users.sqlite.sql"); + + // Verify the default value proves the sqlite variant ran + await db.execute("INSERT INTO users (id) VALUES ('test1')"); + const user = await db.queryOne<{ name: string }>( + "SELECT name FROM users WHERE id = 'test1'" + ); + expect(user?.name).toBe("sqlite"); + }); + + it("ignores files for the wrong dialect", async () => { + // Only a postgres-specific file — should be skipped on sqlite + writeFileSync( + join(testMigrationsDir, "001_pg_only.postgres.sql"), + "CREATE TABLE pg_table (id TEXT PRIMARY KEY)" + ); + // A shared file for a different migration + writeFileSync( + join(testMigrationsDir, "002_shared.sql"), + "CREATE TABLE shared_table (id TEXT PRIMARY KEY)" + ); + + const runner = new MigrationRunner(db, testMigrationsDir); + const appliedCount = await runner.runPendingMigrations(); + + // Only the shared migration should have been applied + expect(appliedCount).toBe(1); + + const migrations = await db.query<{ id: string; name: string }>( + "SELECT id, name FROM migrations ORDER BY id" + ); + expect(migrations).toHaveLength(1); + expect(migrations[0].id).toBe("002"); + expect(migrations[0].name).toBe("002_shared.sql"); + + // pg_table should not exist + const tables = await db.query<{ name: string }>( + "SELECT name FROM sqlite_master WHERE type='table' AND name='pg_table'" + ); + expect(tables).toHaveLength(0); + }); + + it("running migrations multiple times is idempotent", async () => { + writeFileSync( + join(testMigrationsDir, "001_create_users.sql"), + "CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT)" + ); + writeFileSync( + join(testMigrationsDir, "002_create_posts.sql"), + "CREATE TABLE posts (id TEXT PRIMARY KEY, title TEXT)" + ); + + const runner = new MigrationRunner(db, testMigrationsDir); + + // Run three times + const first = await runner.runPendingMigrations(); + const second = await runner.runPendingMigrations(); + const third = await runner.runPendingMigrations(); + + expect(first).toBe(2); + expect(second).toBe(0); + expect(third).toBe(0); + + // Schema state is the same + const migrations = await db.query<{ id: string }>( + "SELECT id FROM migrations ORDER BY id" + ); + expect(migrations).toHaveLength(2); + + const tables = await db.query<{ name: string }>( + "SELECT name FROM sqlite_master WHERE type='table' AND name IN ('users', 'posts') ORDER BY name" + ); + expect(tables).toHaveLength(2); + }); + + it("handles multi-statement migrations", async () => { const multiStatementSQL = ` CREATE TABLE users (id TEXT PRIMARY KEY); CREATE TABLE posts (id TEXT PRIMARY KEY, userId TEXT); CREATE INDEX idx_posts_user ON posts(userId); `; - writeFileSync( join(testMigrationsDir, "001_multi_statement.sql"), multiStatementSQL @@ -141,26 +192,13 @@ describe("MigrationRunner", () => { const runner = new MigrationRunner(db, testMigrationsDir); await runner.runPendingMigrations(); - // Verify all tables were created - const tables = await new Promise>( - (resolve, reject) => { - db.all( - "SELECT name FROM sqlite_master WHERE type='table' AND name IN ('users', 'posts')", - (err, rows: Array<{ name: string }>) => { - if (err) reject(err); - else resolve(rows); - } - ); - } + const tables = await db.query<{ name: string }>( + "SELECT name FROM sqlite_master WHERE type='table' AND name IN ('users', 'posts')" ); - expect(tables).toHaveLength(2); - expect(tables.map((t) => t.name)).toContain("users"); - expect(tables.map((t) => t.name)).toContain("posts"); }); - it("should get migration status", async () => { - // Create test migrations + it("gets migration status", async () => { writeFileSync( join(testMigrationsDir, "001_create_users.sql"), "CREATE TABLE users (id TEXT PRIMARY KEY)" @@ -171,73 +209,54 @@ describe("MigrationRunner", () => { ); const runner = new MigrationRunner(db, testMigrationsDir); - - // Apply first migration only await runner.runPendingMigrations(); - // Add a new migration file + // Add a new migration file after initial run writeFileSync( join(testMigrationsDir, "003_create_comments.sql"), "CREATE TABLE comments (id TEXT PRIMARY KEY)" ); - // Get status const status = await runner.getStatus(); - expect(status.applied).toHaveLength(2); expect(status.pending).toHaveLength(1); expect(status.pending[0].id).toBe("003"); }); - it("should handle empty migrations directory", async () => { + it("handles empty migrations directory", async () => { const runner = new MigrationRunner(db, testMigrationsDir); const appliedCount = await runner.runPendingMigrations(); - expect(appliedCount).toBe(0); }); - it("should reject invalid migration filename format", async () => { - // Create migration with invalid filename + it("rejects invalid migration filename format", async () => { writeFileSync( join(testMigrationsDir, "invalid_migration.sql"), "CREATE TABLE test (id TEXT)" ); const runner = new MigrationRunner(db, testMigrationsDir); - await expect(runner.runPendingMigrations()).rejects.toThrow( "Invalid migration filename format" ); }); - it("should handle migration failure gracefully", async () => { - // Create migration with invalid SQL + it("handles migration failure gracefully", async () => { writeFileSync( join(testMigrationsDir, "001_invalid.sql"), "INVALID SQL STATEMENT" ); const runner = new MigrationRunner(db, testMigrationsDir); - await expect(runner.runPendingMigrations()).rejects.toThrow(); - // Verify migration was not recorded as applied - const migrations = await new Promise>( - (resolve, reject) => { - db.all( - "SELECT id FROM migrations", - (err, rows: Array<{ id: string }>) => { - if (err) reject(err); - else resolve(rows); - } - ); - } + const migrations = await db.query<{ id: string }>( + "SELECT id FROM migrations" ); - expect(migrations).toHaveLength(0); }); - it("should ignore SQL comments in migrations", async () => { + it("ignores SQL comments in migrations", async () => { const sqlWithComments = ` -- This is a comment CREATE TABLE users ( @@ -247,7 +266,6 @@ describe("MigrationRunner", () => { ); -- Final comment `; - writeFileSync( join(testMigrationsDir, "001_with_comments.sql"), sqlWithComments @@ -256,17 +274,9 @@ describe("MigrationRunner", () => { const runner = new MigrationRunner(db, testMigrationsDir); await runner.runPendingMigrations(); - // Verify table was created - const result = await new Promise<{ count: number }>((resolve, reject) => { - db.get( - "SELECT COUNT(*) as count FROM sqlite_master WHERE type='table' AND name='users'", - (err, row: { count: number }) => { - if (err) reject(err); - else resolve(row); - } - ); - }); - - expect(result.count).toBe(1); + const result = await db.queryOne<{ count: number }>( + "SELECT COUNT(*) as count FROM sqlite_master WHERE type='table' AND name='users'" + ); + expect(result?.count).toBe(1); }); }); diff --git a/backend/test/database/PostgresAdapter.test.ts b/backend/test/database/PostgresAdapter.test.ts new file mode 100644 index 00000000..dfa22c50 --- /dev/null +++ b/backend/test/database/PostgresAdapter.test.ts @@ -0,0 +1,58 @@ +import { describe, it, expect } from "vitest"; +import { PostgresAdapter } from "../../src/database/PostgresAdapter"; +import { DatabaseConnectionError } from "../../src/database/errors"; + +describe("PostgresAdapter", () => { + describe("instantiation", () => { + it("can be instantiated with a connection URL", () => { + const adapter = new PostgresAdapter("postgresql://localhost:5432/test"); + expect(adapter).toBeInstanceOf(PostgresAdapter); + }); + + it("reports not connected before initialize", () => { + const adapter = new PostgresAdapter("postgresql://localhost:5432/test"); + expect(adapter.isConnected()).toBe(false); + }); + }); + + describe("getDialect", () => { + it("returns postgres", () => { + const adapter = new PostgresAdapter("postgresql://localhost:5432/test"); + expect(adapter.getDialect()).toBe("postgres"); + }); + }); + + describe("getPlaceholder", () => { + it("returns $1 for index 1", () => { + const adapter = new PostgresAdapter("postgresql://localhost:5432/test"); + expect(adapter.getPlaceholder(1)).toBe("$1"); + }); + + it("returns $2 for index 2", () => { + const adapter = new PostgresAdapter("postgresql://localhost:5432/test"); + expect(adapter.getPlaceholder(2)).toBe("$2"); + }); + + it("returns $99 for index 99", () => { + const adapter = new PostgresAdapter("postgresql://localhost:5432/test"); + expect(adapter.getPlaceholder(99)).toBe("$99"); + }); + + it("returns $0 for index 0", () => { + const adapter = new PostgresAdapter("postgresql://localhost:5432/test"); + expect(adapter.getPlaceholder(0)).toBe("$0"); + }); + }); + + describe("initialize", () => { + it("throws DatabaseConnectionError when server is unreachable", async () => { + const adapter = new PostgresAdapter( + "postgresql://localhost:59999/nonexistent_db", + ); + await expect(adapter.initialize()).rejects.toThrow( + DatabaseConnectionError, + ); + expect(adapter.isConnected()).toBe(false); + }); + }); +}); diff --git a/backend/test/database/SQLiteAdapter.test.ts b/backend/test/database/SQLiteAdapter.test.ts new file mode 100644 index 00000000..90e385fd --- /dev/null +++ b/backend/test/database/SQLiteAdapter.test.ts @@ -0,0 +1,226 @@ +import { describe, it, expect, beforeEach, afterEach } from "vitest"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; +import { DatabaseQueryError, DatabaseConnectionError } from "../../src/database/errors"; + +describe("SQLiteAdapter", () => { + let adapter: SQLiteAdapter; + + beforeEach(async () => { + adapter = new SQLiteAdapter(":memory:"); + await adapter.initialize(); + }); + + afterEach(async () => { + if (adapter.isConnected()) { + await adapter.close(); + } + }); + + describe("initialize", () => { + it("sets connected to true after init", () => { + expect(adapter.isConnected()).toBe(true); + }); + + it("enables WAL mode (memory db falls back to memory journal)", async () => { + // :memory: databases cannot use WAL, SQLite silently uses "memory" instead. + // WAL mode is correctly enabled for file-based databases. + const row = await adapter.queryOne<{ journal_mode: string }>( + "PRAGMA journal_mode;", + ); + expect(row?.journal_mode).toBe("memory"); + }); + + it("enables foreign keys", async () => { + const row = await adapter.queryOne<{ foreign_keys: number }>( + "PRAGMA foreign_keys;", + ); + expect(row?.foreign_keys).toBe(1); + }); + }); + + describe("close", () => { + it("sets connected to false", async () => { + await adapter.close(); + expect(adapter.isConnected()).toBe(false); + }); + + it("is safe to call when already closed", async () => { + await adapter.close(); + await adapter.close(); // should not throw + }); + }); + + describe("getDialect / getPlaceholder", () => { + it("returns sqlite dialect", () => { + expect(adapter.getDialect()).toBe("sqlite"); + }); + + it("returns ? for any index", () => { + expect(adapter.getPlaceholder(1)).toBe("?"); + expect(adapter.getPlaceholder(99)).toBe("?"); + }); + }); + + describe("query", () => { + it("returns rows from a SELECT", async () => { + await adapter.execute( + "CREATE TABLE t (id INTEGER PRIMARY KEY, name TEXT)", + ); + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["alice"]); + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["bob"]); + + const rows = await adapter.query<{ id: number; name: string }>( + "SELECT * FROM t ORDER BY id", + ); + expect(rows).toHaveLength(2); + expect(rows[0].name).toBe("alice"); + expect(rows[1].name).toBe("bob"); + }); + + it("returns empty array for no matches", async () => { + await adapter.execute("CREATE TABLE t (id INTEGER PRIMARY KEY)"); + const rows = await adapter.query("SELECT * FROM t"); + expect(rows).toEqual([]); + }); + + it("throws DatabaseQueryError on invalid SQL", async () => { + await expect(adapter.query("SELECT * FROM nonexistent")).rejects.toThrow( + DatabaseQueryError, + ); + }); + }); + + describe("queryOne", () => { + it("returns a single row", async () => { + await adapter.execute( + "CREATE TABLE t (id INTEGER PRIMARY KEY, name TEXT)", + ); + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["alice"]); + + const row = await adapter.queryOne<{ name: string }>( + "SELECT name FROM t WHERE id = 1", + ); + expect(row?.name).toBe("alice"); + }); + + it("returns null when no match", async () => { + await adapter.execute("CREATE TABLE t (id INTEGER PRIMARY KEY)"); + const row = await adapter.queryOne("SELECT * FROM t WHERE id = 999"); + expect(row).toBeNull(); + }); + + it("throws DatabaseQueryError on invalid SQL", async () => { + await expect( + adapter.queryOne("SELECT * FROM nonexistent"), + ).rejects.toThrow(DatabaseQueryError); + }); + }); + + describe("execute", () => { + it("returns changes count for INSERT", async () => { + await adapter.execute( + "CREATE TABLE t (id INTEGER PRIMARY KEY, name TEXT)", + ); + const result = await adapter.execute("INSERT INTO t (name) VALUES (?)", [ + "alice", + ]); + expect(result.changes).toBe(1); + }); + + it("returns changes count for UPDATE", async () => { + await adapter.execute( + "CREATE TABLE t (id INTEGER PRIMARY KEY, name TEXT)", + ); + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["alice"]); + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["bob"]); + + const result = await adapter.execute("UPDATE t SET name = ?", [ + "charlie", + ]); + expect(result.changes).toBe(2); + }); + + it("throws DatabaseQueryError on invalid SQL", async () => { + await expect( + adapter.execute("INSERT INTO nonexistent VALUES (1)"), + ).rejects.toThrow(DatabaseQueryError); + }); + }); + + describe("transactions", () => { + beforeEach(async () => { + await adapter.execute( + "CREATE TABLE t (id INTEGER PRIMARY KEY, name TEXT)", + ); + }); + + it("commits on success", async () => { + await adapter.beginTransaction(); + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["alice"]); + await adapter.commit(); + + const rows = await adapter.query("SELECT * FROM t"); + expect(rows).toHaveLength(1); + }); + + it("rolls back on rollback()", async () => { + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["before"]); + await adapter.beginTransaction(); + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["during"]); + await adapter.rollback(); + + const rows = await adapter.query("SELECT * FROM t"); + expect(rows).toHaveLength(1); + }); + + it("throws on nested beginTransaction", async () => { + await adapter.beginTransaction(); + await expect(adapter.beginTransaction()).rejects.toThrow( + "Nested transactions are not supported in SQLite", + ); + await adapter.rollback(); + }); + + it("withTransaction commits on success", async () => { + const result = await adapter.withTransaction(async () => { + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["alice"]); + return "done"; + }); + + expect(result).toBe("done"); + const rows = await adapter.query("SELECT * FROM t"); + expect(rows).toHaveLength(1); + }); + + it("withTransaction rolls back on error", async () => { + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["before"]); + + await expect( + adapter.withTransaction(async () => { + await adapter.execute("INSERT INTO t (name) VALUES (?)", ["during"]); + throw new Error("boom"); + }), + ).rejects.toThrow("boom"); + + const rows = await adapter.query("SELECT * FROM t"); + expect(rows).toHaveLength(1); + }); + }); + + describe("not connected", () => { + it("throws on query when not connected", async () => { + const fresh = new SQLiteAdapter(":memory:"); + expect(() => fresh.query("SELECT 1")).toThrow(DatabaseQueryError); + }); + + it("throws on queryOne when not connected", async () => { + const fresh = new SQLiteAdapter(":memory:"); + expect(() => fresh.queryOne("SELECT 1")).toThrow(DatabaseQueryError); + }); + + it("throws on execute when not connected", async () => { + const fresh = new SQLiteAdapter(":memory:"); + expect(() => fresh.execute("SELECT 1")).toThrow(DatabaseQueryError); + }); + }); +}); diff --git a/backend/test/database/index-verification.test.ts b/backend/test/database/index-verification.test.ts index 0efed02c..ac754518 100644 --- a/backend/test/database/index-verification.test.ts +++ b/backend/test/database/index-verification.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeAll, afterAll } from 'vitest'; -import sqlite3 from 'sqlite3'; +import type { DatabaseAdapter } from '../../src/database/DatabaseAdapter'; import { DatabaseService } from '../../src/database/DatabaseService'; import { mkdtempSync, rmSync } from 'fs'; import { tmpdir } from 'os'; @@ -7,7 +7,7 @@ import { join } from 'path'; describe('Database Index Verification', () => { let databaseService: DatabaseService; - let db: sqlite3.Database; + let db: DatabaseAdapter; let tempDir: string; beforeAll(async () => { @@ -27,15 +27,7 @@ describe('Database Index Verification', () => { }); it('should have all required indexes created', async () => { - const indexes = await new Promise((resolve, reject) => { - db.all( - "SELECT name, tbl_name FROM sqlite_master WHERE type='index' AND name LIKE 'idx_%'", - (err, rows) => { - if (err) reject(err); - else resolve(rows); - } - ); - }); + const indexes = await db.query("SELECT name, tbl_name FROM sqlite_master WHERE type='index' AND name LIKE 'idx_%'"); const indexNames = indexes.map((idx) => idx.name); @@ -56,12 +48,6 @@ describe('Database Index Verification', () => { 'idx_role_permissions_role', 'idx_role_permissions_perm', - // Composite indexes for optimized permission checks - 'idx_user_roles_composite', - 'idx_user_groups_composite', - 'idx_group_roles_composite', - 'idx_role_permissions_composite', - // Permission lookups 'idx_permissions_resource_action', @@ -79,26 +65,22 @@ describe('Database Index Verification', () => { }); it('should have composite indexes on junction tables', async () => { - const compositeIndexes = await new Promise((resolve, reject) => { - db.all( - "SELECT name, tbl_name, sql FROM sqlite_master WHERE type='index' AND name LIKE '%composite%'", - (err, rows) => { - if (err) reject(err); - else resolve(rows); - } - ); - }); + // Verify that key junction table indexes exist (single-column indexes on junction tables) + const junctionIndexes = await db.query( + "SELECT name, tbl_name, sql FROM sqlite_master WHERE type='index' AND name LIKE 'idx_%' AND (tbl_name='user_roles' OR tbl_name='user_groups' OR tbl_name='group_roles' OR tbl_name='role_permissions')" + ); - expect(compositeIndexes.length).toBeGreaterThanOrEqual(4); + // Should have at least 2 indexes per junction table (one per column) + expect(junctionIndexes.length).toBeGreaterThanOrEqual(4); - const indexInfo = compositeIndexes.map((idx) => ({ + const indexInfo = junctionIndexes.map((idx) => ({ name: idx.name, table: idx.tbl_name, })); - console.log('Composite indexes:', indexInfo); + console.log('Junction table indexes:', indexInfo); - // Verify composite indexes are on the correct tables + // Verify indexes exist on the correct tables expect(indexInfo.some((idx) => idx.table === 'user_roles')).toBe(true); expect(indexInfo.some((idx) => idx.table === 'user_groups')).toBe(true); expect(indexInfo.some((idx) => idx.table === 'group_roles')).toBe(true); @@ -106,24 +88,16 @@ describe('Database Index Verification', () => { }); it('should have WAL mode enabled', async () => { - const journalMode = await new Promise((resolve, reject) => { - db.get('PRAGMA journal_mode;', (err, row: any) => { - if (err) reject(err); - else resolve(row.journal_mode); - }); - }); + const row = await db.queryOne('PRAGMA journal_mode'); + const journalMode = row?.journal_mode ?? ''; expect(journalMode.toLowerCase()).toBe('wal'); console.log('✓ WAL mode is enabled'); }); it('should have foreign keys enabled', async () => { - const foreignKeys = await new Promise((resolve, reject) => { - db.get('PRAGMA foreign_keys;', (err, row: any) => { - if (err) reject(err); - else resolve(row.foreign_keys); - }); - }); + const row = await db.queryOne('PRAGMA foreign_keys'); + const foreignKeys = row?.foreign_keys ?? 0; expect(foreignKeys).toBe(1); console.log('✓ Foreign keys are enabled'); @@ -131,24 +105,9 @@ describe('Database Index Verification', () => { it('should have performance pragmas configured', async () => { const pragmas = await Promise.all([ - new Promise((resolve, reject) => { - db.get('PRAGMA synchronous;', (err, row) => { - if (err) reject(err); - else resolve(row); - }); - }), - new Promise((resolve, reject) => { - db.get('PRAGMA cache_size;', (err, row) => { - if (err) reject(err); - else resolve(row); - }); - }), - new Promise((resolve, reject) => { - db.get('PRAGMA temp_store;', (err, row) => { - if (err) reject(err); - else resolve(row); - }); - }), + db.queryOne('PRAGMA synchronous;'), + db.queryOne('PRAGMA cache_size;'), + db.queryOne('PRAGMA temp_store;'), ]); console.log('Performance pragmas:', { diff --git a/backend/test/database/migration-integration.test.ts b/backend/test/database/migration-integration.test.ts index 9962e6ed..053c5ba0 100644 --- a/backend/test/database/migration-integration.test.ts +++ b/backend/test/database/migration-integration.test.ts @@ -28,14 +28,18 @@ describe('Migration Integration Test', () => { it('should apply all migrations on initialization', async () => { const status = await dbService.getMigrationStatus(); - // Should have applied all migrations - expect(status.applied).toHaveLength(6); - expect(status.applied[0].id).toBe('001'); - expect(status.applied[1].id).toBe('002'); - expect(status.applied[2].id).toBe('003'); - expect(status.applied[3].id).toBe('004'); - expect(status.applied[4].id).toBe('005'); - expect(status.applied[5].id).toBe('006'); + // Should have applied all migrations (000 through 009) + expect(status.applied).toHaveLength(10); + expect(status.applied[0].id).toBe('000'); + expect(status.applied[1].id).toBe('001'); + expect(status.applied[2].id).toBe('002'); + expect(status.applied[3].id).toBe('003'); + expect(status.applied[4].id).toBe('004'); + expect(status.applied[5].id).toBe('005'); + expect(status.applied[6].id).toBe('006'); + expect(status.applied[7].id).toBe('007'); + expect(status.applied[8].id).toBe('008'); + expect(status.applied[9].id).toBe('009'); expect(status.pending).toHaveLength(0); }); @@ -43,23 +47,13 @@ describe('Migration Integration Test', () => { const db = dbService.getConnection(); // Check that roles exist - const roles = await new Promise((resolve, reject) => { - db.all('SELECT * FROM roles WHERE isBuiltIn = 1', (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); + const roles = await db.query('SELECT * FROM roles WHERE isBuiltIn = 1'); - expect(roles).toHaveLength(3); - expect(roles.map(r => r.name).sort()).toEqual(['Administrator', 'Operator', 'Viewer']); + expect(roles).toHaveLength(4); + expect(roles.map(r => r.name).sort()).toEqual(['Administrator', 'Operator', 'Provisioner', 'Viewer']); // Check that config table exists and has default values - const config = await new Promise((resolve, reject) => { - db.all('SELECT * FROM config', (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); + const config = await db.query('SELECT * FROM config'); expect(config.length).toBeGreaterThan(0); const configMap = Object.fromEntries(config.map((c: any) => [c.key, c.value])); @@ -71,12 +65,8 @@ describe('Migration Integration Test', () => { const db = dbService.getConnection(); // Check that no admin users exist - const adminCount = await new Promise((resolve, reject) => { - db.get('SELECT COUNT(*) as count FROM users WHERE isAdmin = 1', (err, row: any) => { - if (err) reject(err); - else resolve(row.count); - }); - }); + const row = await db.queryOne('SELECT COUNT(*) as count FROM users WHERE isAdmin = 1'); + const adminCount = row?.count ?? 0; expect(adminCount).toBe(0); }); @@ -90,8 +80,8 @@ describe('Migration Integration Test', () => { const status = await dbService2.getMigrationStatus(); - // Should still have 6 applied, 0 pending - expect(status.applied).toHaveLength(6); + // Should still have 10 applied, 0 pending + expect(status.applied).toHaveLength(10); expect(status.pending).toHaveLength(0); await dbService2.close(); diff --git a/backend/test/database/rbac-schema.test.ts b/backend/test/database/rbac-schema.test.ts index 6a04555e..a860fa7e 100644 --- a/backend/test/database/rbac-schema.test.ts +++ b/backend/test/database/rbac-schema.test.ts @@ -26,15 +26,7 @@ describe('RBAC Database Schema', () => { it('should create users table with correct schema', async () => { const db = dbService.getConnection(); - const result = await new Promise((resolve, reject) => { - db.get( - "SELECT sql FROM sqlite_master WHERE type='table' AND name='users'", - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); + const result = await db.queryOne("SELECT sql FROM sqlite_master WHERE type='table' AND name='users'"); expect(result).toBeDefined(); expect(result.sql).toContain('id TEXT PRIMARY KEY'); @@ -48,15 +40,7 @@ describe('RBAC Database Schema', () => { it('should create groups table', async () => { const db = dbService.getConnection(); - const result = await new Promise((resolve, reject) => { - db.get( - "SELECT sql FROM sqlite_master WHERE type='table' AND name='groups'", - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); + const result = await db.queryOne("SELECT sql FROM sqlite_master WHERE type='table' AND name='groups'"); expect(result).toBeDefined(); expect(result.sql).toContain('id TEXT PRIMARY KEY'); @@ -66,15 +50,7 @@ describe('RBAC Database Schema', () => { it('should create roles table', async () => { const db = dbService.getConnection(); - const result = await new Promise((resolve, reject) => { - db.get( - "SELECT sql FROM sqlite_master WHERE type='table' AND name='roles'", - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); + const result = await db.queryOne("SELECT sql FROM sqlite_master WHERE type='table' AND name='roles'"); expect(result).toBeDefined(); expect(result.sql).toContain('id TEXT PRIMARY KEY'); @@ -85,15 +61,7 @@ describe('RBAC Database Schema', () => { it('should create permissions table with unique constraint', async () => { const db = dbService.getConnection(); - const result = await new Promise((resolve, reject) => { - db.get( - "SELECT sql FROM sqlite_master WHERE type='table' AND name='permissions'", - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); + const result = await db.queryOne("SELECT sql FROM sqlite_master WHERE type='table' AND name='permissions'"); expect(result).toBeDefined(); expect(result.sql).toContain('id TEXT PRIMARY KEY'); @@ -108,15 +76,7 @@ describe('RBAC Database Schema', () => { const tables = ['user_groups', 'user_roles', 'group_roles', 'role_permissions']; for (const tableName of tables) { - const result = await new Promise((resolve, reject) => { - db.get( - `SELECT sql FROM sqlite_master WHERE type='table' AND name='${tableName}'`, - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); + const result = await db.queryOne(`SELECT sql FROM sqlite_master WHERE type='table' AND name='${tableName}'`); expect(result).toBeDefined(); expect(result.sql).toContain('PRIMARY KEY'); @@ -127,15 +87,7 @@ describe('RBAC Database Schema', () => { it('should create revoked_tokens table', async () => { const db = dbService.getConnection(); - const result = await new Promise((resolve, reject) => { - db.get( - "SELECT sql FROM sqlite_master WHERE type='table' AND name='revoked_tokens'", - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); + const result = await db.queryOne("SELECT sql FROM sqlite_master WHERE type='table' AND name='revoked_tokens'"); expect(result).toBeDefined(); expect(result.sql).toContain('token TEXT PRIMARY KEY'); @@ -166,15 +118,7 @@ describe('RBAC Database Schema', () => { ]; for (const indexName of expectedIndexes) { - const result = await new Promise((resolve, reject) => { - db.get( - `SELECT name FROM sqlite_master WHERE type='index' AND name='${indexName}'`, - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); + const result = await db.queryOne(`SELECT name FROM sqlite_master WHERE type='index' AND name='${indexName}'`); expect(result).toBeDefined(); expect(result.name).toBe(indexName); @@ -185,29 +129,13 @@ describe('RBAC Database Schema', () => { const db = dbService.getConnection(); // Insert first user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, createdAt, updatedAt) - VALUES ('user1', 'testuser', 'test@example.com', 'hash123', 'Test', 'User', '2024-01-01T00:00:00Z', '2024-01-01T00:00:00Z')`, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute(`INSERT INTO users (id, username, email, passwordHash, firstName, lastName, createdAt, updatedAt) + VALUES ('user1', 'testuser', 'test@example.com', 'hash123', 'Test', 'User', '2024-01-01T00:00:00Z', '2024-01-01T00:00:00Z')`); // Try to insert duplicate username await expect( - new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, createdAt, updatedAt) - VALUES ('user2', 'testuser', 'other@example.com', 'hash456', 'Other', 'User', '2024-01-01T00:00:00Z', '2024-01-01T00:00:00Z')`, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }) + db.execute(`INSERT INTO users (id, username, email, passwordHash, firstName, lastName, createdAt, updatedAt) + VALUES ('user2', 'testuser', 'other@example.com', 'hash456', 'Other', 'User', '2024-01-01T00:00:00Z', '2024-01-01T00:00:00Z')`) ).rejects.toThrow(); }); @@ -215,29 +143,13 @@ describe('RBAC Database Schema', () => { const db = dbService.getConnection(); // Insert first permission (use unique test values to avoid conflicts with seed data) - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO permissions (id, resource, "action", description, createdAt) - VALUES ('perm-test-1', 'test-resource', 'test-read', 'Test permission', '2024-01-01T00:00:00Z')`, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute(`INSERT INTO permissions (id, resource, "action", description, createdAt) + VALUES ('perm-test-1', 'test-resource', 'test-read', 'Test permission', '2024-01-01T00:00:00Z')`); // Try to insert duplicate resource-action await expect( - new Promise((resolve, reject) => { - db.run( - `INSERT INTO permissions (id, resource, "action", description, createdAt) - VALUES ('perm-test-2', 'test-resource', 'test-read', 'Another test permission', '2024-01-01T00:00:00Z')`, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }) + db.execute(`INSERT INTO permissions (id, resource, "action", description, createdAt) + VALUES ('perm-test-2', 'test-resource', 'test-read', 'Another test permission', '2024-01-01T00:00:00Z')`) ).rejects.toThrow(); }); }); diff --git a/backend/test/integration/auth-flow.test.ts b/backend/test/integration/auth-flow.test.ts index 2f1f0450..77b22d99 100644 --- a/backend/test/integration/auth-flow.test.ts +++ b/backend/test/integration/auth-flow.test.ts @@ -655,16 +655,10 @@ describe('Authentication Flow Integration Tests', () => { .expect(200); // Deactivate user - await new Promise((resolve, reject) => { - databaseService.getConnection().run( - 'UPDATE users SET isActive = 0 WHERE id = ?', - [userId], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + 'UPDATE users SET isActive = 0 WHERE id = ?', + [userId] + ); // Try to login with inactive account await request(app) diff --git a/backend/test/integration/batch-execution.test.ts b/backend/test/integration/batch-execution.test.ts index 584ef758..cbfab1d9 100644 --- a/backend/test/integration/batch-execution.test.ts +++ b/backend/test/integration/batch-execution.test.ts @@ -8,7 +8,8 @@ import { } from "vitest"; import express, { type Express } from "express"; import request from "supertest"; -import sqlite3 from "sqlite3"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; import { ExecutionRepository } from "../../src/database/ExecutionRepository"; import { createExecutionsRouter } from "../../src/routes/executions"; import { errorHandler, requestIdMiddleware } from "../../src/middleware/errorHandler"; @@ -547,35 +548,26 @@ describe("Batch Execution API Endpoints", () => { */ describe("Batch Execution End-to-End Flow", () => { let app: Express; - let db: sqlite3.Database; + let db: DatabaseAdapter; let executionRepository: ExecutionRepository; let batchExecutionService: RealBatchExecutionService; let mockExecutionQueue: ExecutionQueue; let mockIntegrationManager: IntegrationManager; // Helper to run database queries - const runQuery = (sql: string, params: any[] = []): Promise => { - return new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + const runQuery = async (sql: string, params: any[] = []): Promise => { + await db.execute(sql, params); }; // Helper to get database rows - const getRows = (sql: string, params: any[] = []): Promise => { - return new Promise((resolve, reject) => { - db.all(sql, params, (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); + const getRows = async (sql: string, params: any[] = []): Promise => { + return db.query(sql, params); }; beforeEach(async () => { // Create in-memory database - db = new sqlite3.Database(":memory:"); + db = new SQLiteAdapter(":memory:"); + await db.initialize(); // Create executions table await runQuery(` @@ -704,9 +696,7 @@ describe("Batch Execution End-to-End Flow", () => { afterEach(async () => { // Close database - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); vi.restoreAllMocks(); }); diff --git a/backend/test/integration/integration-colors.test.ts b/backend/test/integration/integration-colors.test.ts index 53a1fd1b..c802c377 100644 --- a/backend/test/integration/integration-colors.test.ts +++ b/backend/test/integration/integration-colors.test.ts @@ -34,7 +34,7 @@ describe('Integration Colors API', () => { // Verify all five integrations are present const { colors, integrations } = response.body; - expect(integrations).toEqual(['bolt', 'ansible', 'puppetdb', 'puppetserver', 'hiera', 'ssh']); + expect(integrations).toEqual(['proxmox', 'aws', 'bolt', 'ansible', 'ssh', 'puppetdb', 'puppetserver', 'hiera']); // Verify each integration has color configuration for (const integration of integrations) { diff --git a/backend/test/integration/integration-status.test.ts b/backend/test/integration/integration-status.test.ts index 6f815162..fd1a00f0 100644 --- a/backend/test/integration/integration-status.test.ts +++ b/backend/test/integration/integration-status.test.ts @@ -149,8 +149,8 @@ describe("Integration Status API", () => { expect(response.body).toHaveProperty("integrations"); expect(response.body).toHaveProperty("timestamp"); expect(Array.isArray(response.body.integrations)).toBe(true); - // Configured plugins + unconfigured Puppetserver and Hiera - expect(response.body.integrations).toHaveLength(4); + // Configured plugins + unconfigured Puppetserver, Hiera, Proxmox, and AWS + expect(response.body.integrations).toHaveLength(6); // Check first integration const puppetdb = response.body.integrations.find( @@ -246,8 +246,8 @@ describe("Integration Status API", () => { .get("/api/integrations/status") .expect(200); - // Should have unconfigured puppetdb, puppetserver, bolt, and hiera entries - expect(response.body.integrations).toHaveLength(4); + // Should have unconfigured puppetdb, puppetserver, bolt, hiera, proxmox, and aws entries + expect(response.body.integrations).toHaveLength(6); expect(response.body.timestamp).toBeDefined(); const puppetdb = response.body.integrations.find( @@ -284,8 +284,8 @@ describe("Integration Status API", () => { .expect(200); expect(response.body.cached).toBe(true); - // Configured plugins + unconfigured Puppetserver and Hiera - expect(response.body.integrations).toHaveLength(4); + // Configured plugins + unconfigured Puppetserver, Hiera, Proxmox, and AWS + expect(response.body.integrations).toHaveLength(6); }); it("should refresh health checks when requested", async () => { @@ -294,8 +294,8 @@ describe("Integration Status API", () => { .expect(200); expect(response.body.cached).toBe(false); - // Configured plugins + unconfigured Puppetserver and Hiera - expect(response.body.integrations).toHaveLength(4); + // Configured plugins + unconfigured Puppetserver, Hiera, Proxmox, and AWS + expect(response.body.integrations).toHaveLength(6); }); }); }); diff --git a/backend/test/integration/permission-inheritance.test.ts b/backend/test/integration/permission-inheritance.test.ts index b3d8a1f4..42aa2a6f 100644 --- a/backend/test/integration/permission-inheritance.test.ts +++ b/backend/test/integration/permission-inheritance.test.ts @@ -1,5 +1,6 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../src/database/DatabaseAdapter'; import { v4 as uuidv4 } from 'uuid'; import { PermissionService } from '../../src/services/PermissionService'; import { UserService } from '../../src/services/UserService'; @@ -19,7 +20,7 @@ import { AuthenticationService } from '../../src/services/AuthenticationService' * Validates Requirements: 8.1, 8.2, 8.3, 8.5 */ describe('Permission Inheritance Integration Tests', () => { - let db: Database; + let db: DatabaseAdapter; let permissionService: PermissionService; let userService: UserService; let groupService: GroupService; @@ -31,7 +32,8 @@ describe('Permission Inheritance Integration Tests', () => { process.env.JWT_SECRET = 'test-secret-key-for-permission-inheritance-tests'; // pragma: allowlist secret // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -767,120 +769,31 @@ describe('Permission Inheritance Integration Tests', () => { }); // Helper functions -async function initializeSchema(db: Database): Promise { - return new Promise((resolve, reject) => { - db.exec(` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT UNIQUE NOT NULL, - email TEXT UNIQUE NOT NULL, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER DEFAULT 1, - isAdmin INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - isBuiltIn INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE permissions ( - id TEXT PRIMARY KEY, - resource TEXT NOT NULL, - action TEXT NOT NULL, - description TEXT, - createdAt TEXT NOT NULL, - UNIQUE(resource, action) - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (groupId) REFERENCES groups(id) - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (roleId) REFERENCES roles(id) - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id), - FOREIGN KEY (roleId) REFERENCES roles(id) - ); - - CREATE TABLE role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id), - FOREIGN KEY (permissionId) REFERENCES permissions(id) - ); - - -- Indexes for performance - CREATE INDEX idx_users_username ON users(username); - CREATE INDEX idx_users_email ON users(email); - CREATE INDEX idx_users_active ON users(isActive); - CREATE INDEX idx_user_roles_user ON user_roles(userId); - CREATE INDEX idx_user_roles_role ON user_roles(roleId); - CREATE INDEX idx_group_roles_group ON group_roles(groupId); - CREATE INDEX idx_group_roles_role ON group_roles(roleId); - CREATE INDEX idx_user_groups_user ON user_groups(userId); - CREATE INDEX idx_user_groups_group ON user_groups(groupId); - CREATE INDEX idx_role_permissions_role ON role_permissions(roleId); - CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId); - CREATE INDEX idx_permissions_resource_action ON permissions(resource, action); - - CREATE TABLE config ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - INSERT INTO config (key, value, updatedAt) VALUES - ('allow_self_registration', 'false', datetime('now')), - ('default_new_user_role', 'role-viewer-001', datetime('now')); - `, (err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function initializeSchema(db: DatabaseAdapter): Promise { + await db.execute(`CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT UNIQUE NOT NULL, email TEXT UNIQUE NOT NULL, passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, isActive INTEGER DEFAULT 1, isAdmin INTEGER DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT )`); + await db.execute(`CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT UNIQUE NOT NULL, description TEXT, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT UNIQUE NOT NULL, description TEXT, isBuiltIn INTEGER DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE permissions ( id TEXT PRIMARY KEY, resource TEXT NOT NULL, action TEXT NOT NULL, description TEXT, createdAt TEXT NOT NULL, UNIQUE(resource, action) )`); + await db.execute(`CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId), FOREIGN KEY (userId) REFERENCES users(id), FOREIGN KEY (groupId) REFERENCES groups(id) )`); + await db.execute(`CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId), FOREIGN KEY (userId) REFERENCES users(id), FOREIGN KEY (roleId) REFERENCES roles(id) )`); + await db.execute(`CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId), FOREIGN KEY (groupId) REFERENCES groups(id), FOREIGN KEY (roleId) REFERENCES roles(id) )`); + await db.execute(`CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId), FOREIGN KEY (roleId) REFERENCES roles(id), FOREIGN KEY (permissionId) REFERENCES permissions(id) )`); + await db.execute(`CREATE INDEX idx_users_username ON users(username)`); + await db.execute(`CREATE INDEX idx_users_email ON users(email)`); + await db.execute(`CREATE INDEX idx_users_active ON users(isActive)`); + await db.execute(`CREATE INDEX idx_user_roles_user ON user_roles(userId)`); + await db.execute(`CREATE INDEX idx_user_roles_role ON user_roles(roleId)`); + await db.execute(`CREATE INDEX idx_group_roles_group ON group_roles(groupId)`); + await db.execute(`CREATE INDEX idx_group_roles_role ON group_roles(roleId)`); + await db.execute(`CREATE INDEX idx_user_groups_user ON user_groups(userId)`); + await db.execute(`CREATE INDEX idx_user_groups_group ON user_groups(groupId)`); + await db.execute(`CREATE INDEX idx_role_permissions_role ON role_permissions(roleId)`); + await db.execute(`CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId)`); + await db.execute(`CREATE INDEX idx_permissions_resource_action ON permissions(resource, action)`); + await db.execute(`CREATE TABLE config ( key TEXT PRIMARY KEY, value TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`INSERT INTO config (key, value, updatedAt) VALUES ('allow_self_registration', 'false', datetime('now')), ('default_new_user_role', '', datetime('now'))`); } -async function closeDatabase(db: Database): Promise { - return new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function closeDatabase(db: DatabaseAdapter): Promise { + await db.close(); } diff --git a/backend/test/integrationConfig.routes.test.ts b/backend/test/integrationConfig.routes.test.ts new file mode 100644 index 00000000..68a358fb --- /dev/null +++ b/backend/test/integrationConfig.routes.test.ts @@ -0,0 +1,265 @@ +import { describe, it, expect, beforeEach, vi } from "vitest"; +import express, { type Express } from "express"; +import request from "supertest"; + +// Mock IntegrationConfigService +const mockSaveConfig = vi.fn(); +const mockGetEffectiveConfig = vi.fn(); +const mockDeleteConfig = vi.fn(); +const mockListConfigs = vi.fn(); + +vi.mock("../src/services/IntegrationConfigService", () => ({ + IntegrationConfigService: class { + saveConfig = mockSaveConfig; + getEffectiveConfig = mockGetEffectiveConfig; + deleteConfig = mockDeleteConfig; + listConfigs = mockListConfigs; + }, +})); + +// Mock auth middleware — pass through and set req.user +vi.mock("../src/middleware/authMiddleware", () => ({ + createAuthMiddleware: () => { + return (req: any, _res: any, next: any) => { + // If test sets no-auth header, skip setting user + if (req.headers["x-test-no-auth"] === "true") { + return next(); + } + req.user = { + userId: "user-123", + username: "testuser", + roles: ["Administrator"], + iat: Math.floor(Date.now() / 1000), + exp: Math.floor(Date.now() / 1000) + 3600, + }; + next(); + }; + }, +})); + +// Mock RBAC middleware — check for x-test-forbidden header +vi.mock("../src/middleware/rbacMiddleware", () => ({ + createRbacMiddleware: () => { + return (resource: string, action: string) => { + return (req: any, res: any, next: any) => { + if (req.headers["x-test-forbidden"] === "true") { + return res.status(403).json({ + error: { + code: "INSUFFICIENT_PERMISSIONS", + message: "Insufficient permissions", + required: { resource, action }, + }, + }); + } + next(); + }; + }; + }, +})); + +// Mock LoggerService +vi.mock("../src/services/LoggerService", () => ({ + LoggerService: class { + info = vi.fn(); + warn = vi.fn(); + error = vi.fn(); + debug = vi.fn(); + }, +})); + +import { createIntegrationConfigRouter } from "../src/routes/integrationConfig"; + +describe("Integration Config Routes", () => { + let app: Express; + + const fakeDatabaseService = { + getConnection: () => ({} as any), + } as any; + + beforeEach(() => { + vi.clearAllMocks(); + app = express(); + app.use(express.json()); + app.use("/api/config/integrations", createIntegrationConfigRouter(fakeDatabaseService)); + }); + + // ---- Authentication tests ---- + + describe("Authentication", () => { + it("GET / returns 401 when user is not authenticated", async () => { + const res = await request(app) + .get("/api/config/integrations") + .set("x-test-no-auth", "true") + .expect(401); + + expect(res.body.error.code).toBe("UNAUTHORIZED"); + }); + + it("PUT /:name returns 401 when user is not authenticated", async () => { + const res = await request(app) + .put("/api/config/integrations/proxmox") + .set("x-test-no-auth", "true") + .send({ config: { host: "10.0.0.1" } }) + .expect(401); + + expect(res.body.error.code).toBe("UNAUTHORIZED"); + }); + + it("DELETE /:name returns 401 when user is not authenticated", async () => { + const res = await request(app) + .delete("/api/config/integrations/proxmox") + .set("x-test-no-auth", "true") + .expect(401); + + expect(res.body.error.code).toBe("UNAUTHORIZED"); + }); + }); + + // ---- Authorization tests ---- + + describe("Authorization", () => { + it("GET / returns 403 when user lacks integration_config:read", async () => { + const res = await request(app) + .get("/api/config/integrations") + .set("x-test-forbidden", "true") + .expect(403); + + expect(res.body.error.code).toBe("INSUFFICIENT_PERMISSIONS"); + }); + + it("PUT /:name returns 403 when user lacks integration_config:configure", async () => { + const res = await request(app) + .put("/api/config/integrations/proxmox") + .set("x-test-forbidden", "true") + .send({ config: { host: "10.0.0.1" } }) + .expect(403); + + expect(res.body.error.code).toBe("INSUFFICIENT_PERMISSIONS"); + }); + + it("DELETE /:name returns 403 when user lacks integration_config:configure", async () => { + const res = await request(app) + .delete("/api/config/integrations/proxmox") + .set("x-test-forbidden", "true") + .expect(403); + + expect(res.body.error.code).toBe("INSUFFICIENT_PERMISSIONS"); + }); + }); + + // ---- Validation tests ---- + + describe("Validation", () => { + it("PUT /:name returns 400 when body is missing config", async () => { + const res = await request(app) + .put("/api/config/integrations/proxmox") + .send({}) + .expect(400); + + expect(res.body.error.code).toBe("VALIDATION_ERROR"); + }); + + it("PUT /:name returns 400 when config is not an object", async () => { + const res = await request(app) + .put("/api/config/integrations/proxmox") + .send({ config: "not-an-object" }) + .expect(400); + + expect(res.body.error.code).toBe("VALIDATION_ERROR"); + }); + }); + + // ---- Happy path tests ---- + + describe("GET /api/config/integrations", () => { + it("returns list of configs for the authenticated user", async () => { + const mockConfigs = [ + { id: "cfg-1", userId: "user-123", integrationName: "proxmox", config: { host: "10.0.0.1" }, isActive: true, createdAt: "2024-01-01T00:00:00Z", updatedAt: "2024-01-01T00:00:00Z" }, + ]; + mockListConfigs.mockResolvedValue(mockConfigs); + + const res = await request(app) + .get("/api/config/integrations") + .expect(200); + + expect(res.body.configs).toEqual(mockConfigs); + expect(mockListConfigs).toHaveBeenCalledWith("user-123"); + }); + }); + + describe("GET /api/config/integrations/:name", () => { + it("returns effective config for the integration", async () => { + const effectiveConfig = { host: "10.0.0.1", port: 8006 }; + mockGetEffectiveConfig.mockResolvedValue(effectiveConfig); + + const res = await request(app) + .get("/api/config/integrations/proxmox") + .expect(200); + + expect(res.body.config).toEqual(effectiveConfig); + expect(mockGetEffectiveConfig).toHaveBeenCalledWith("proxmox"); + }); + + it("returns empty object when no config exists", async () => { + mockGetEffectiveConfig.mockResolvedValue({}); + + const res = await request(app) + .get("/api/config/integrations/unknown") + .expect(200); + + expect(res.body.config).toEqual({}); + }); + }); + + describe("PUT /api/config/integrations/:name", () => { + it("saves config and returns success", async () => { + mockSaveConfig.mockResolvedValue(undefined); + + const res = await request(app) + .put("/api/config/integrations/proxmox") + .send({ config: { host: "10.0.0.1", api_token: "secret-value" } }) + .expect(200); + + expect(res.body.message).toBe("Config saved successfully"); + expect(mockSaveConfig).toHaveBeenCalledWith( + "user-123", + "proxmox", + { host: "10.0.0.1", api_token: "secret-value" }, + ); + }); + + it("returns 500 when service throws", async () => { + mockSaveConfig.mockRejectedValue(new Error("DB error")); + + const res = await request(app) + .put("/api/config/integrations/proxmox") + .send({ config: { host: "10.0.0.1" } }) + .expect(500); + + expect(res.body.error.code).toBe("INTERNAL_SERVER_ERROR"); + }); + }); + + describe("DELETE /api/config/integrations/:name", () => { + it("deletes config and returns success", async () => { + mockDeleteConfig.mockResolvedValue(undefined); + + const res = await request(app) + .delete("/api/config/integrations/proxmox") + .expect(200); + + expect(res.body.message).toBe("Config deleted successfully"); + expect(mockDeleteConfig).toHaveBeenCalledWith("user-123", "proxmox"); + }); + + it("returns 500 when service throws", async () => { + mockDeleteConfig.mockRejectedValue(new Error("DB error")); + + const res = await request(app) + .delete("/api/config/integrations/proxmox") + .expect(500); + + expect(res.body.error.code).toBe("INTERNAL_SERVER_ERROR"); + }); + }); +}); diff --git a/backend/test/integrations/IntegrationManager.test.ts b/backend/test/integrations/IntegrationManager.test.ts index a685f007..d82afc4d 100644 --- a/backend/test/integrations/IntegrationManager.test.ts +++ b/backend/test/integrations/IntegrationManager.test.ts @@ -812,7 +812,7 @@ describe("IntegrationManager", () => { expect(inventory.sources.bad.status).toBe("unavailable"); }); - it("should deduplicate nodes by ID", async () => { + it("should deduplicate nodes by ID and track all sources", async () => { const node: Node = { id: "node1", name: "node1", @@ -845,10 +845,14 @@ describe("IntegrationManager", () => { const inventory = await manager.getAggregatedInventory(); expect(inventory.nodes).toHaveLength(1); - // Should prefer node from higher priority source (source2) - expect((inventory.nodes[0] as Node & { source?: string }).source).toBe( - "source2", - ); + // Should track all sources + expect(inventory.nodes[0].sources).toEqual(expect.arrayContaining(["source1", "source2"])); + expect(inventory.nodes[0].sources).toHaveLength(2); + // Should preserve source-specific data for each source + expect(inventory.nodes[0].sourceData["source1"]).toBeDefined(); + expect(inventory.nodes[0].sourceData["source2"]).toBeDefined(); + // Should mark as linked since it exists in multiple sources + expect(inventory.nodes[0].linked).toBe(true); }); }); @@ -1728,4 +1732,220 @@ describe("IntegrationManager", () => { // by checking that the next call fetches fresh data }); }); + + describe("Provisioning Capabilities", () => { + it("should return empty array when no plugins have provisioning capabilities", () => { + const logger = new LoggerService(); + const manager = new IntegrationManager({ logger }); + + const tool = new MockExecutionTool("tool", logger); + + manager.registerPlugin(tool, { + enabled: true, + name: "tool", + type: "execution", + config: {}, + }); + + const capabilities = manager.getAllProvisioningCapabilities(); + expect(capabilities).toEqual([]); + }); + + it("should return provisioning capabilities from plugins that support them", () => { + const logger = new LoggerService(); + const manager = new IntegrationManager({ logger }); + + // Create a mock plugin with provisioning capabilities + class MockProvisioningTool extends BasePlugin implements ExecutionToolPlugin { + constructor(name: string, logger: LoggerService) { + super(name, "execution", logger); + } + + protected async performInitialization(): Promise { + // Mock initialization + } + + protected async performHealthCheck(): Promise> { + return { + healthy: true, + message: "Mock provisioning tool is healthy", + }; + } + + async executeAction(_action: Action): Promise { + return { + success: true, + output: "Mock execution", + }; + } + + listCapabilities() { + return []; + } + + listProvisioningCapabilities() { + return [ + { + name: "create_vm", + description: "Create a new virtual machine", + operation: "create" as const, + parameters: [ + { name: "name", type: "string", required: true }, + { name: "memory", type: "number", required: false, default: 512 }, + ], + }, + { + name: "destroy_vm", + description: "Destroy a virtual machine", + operation: "destroy" as const, + parameters: [ + { name: "vmid", type: "number", required: true }, + ], + }, + ]; + } + } + + const tool = new MockProvisioningTool("proxmox", logger); + + manager.registerPlugin(tool, { + enabled: true, + name: "proxmox", + type: "execution", + config: {}, + }); + + const capabilities = manager.getAllProvisioningCapabilities(); + + expect(capabilities).toHaveLength(1); + expect(capabilities[0].source).toBe("proxmox"); + expect(capabilities[0].capabilities).toHaveLength(2); + expect(capabilities[0].capabilities[0].name).toBe("create_vm"); + expect(capabilities[0].capabilities[0].operation).toBe("create"); + expect(capabilities[0].capabilities[1].name).toBe("destroy_vm"); + expect(capabilities[0].capabilities[1].operation).toBe("destroy"); + }); + + it("should aggregate provisioning capabilities from multiple plugins", () => { + const logger = new LoggerService(); + const manager = new IntegrationManager({ logger }); + + // Create two mock plugins with provisioning capabilities + class MockProvisioningTool1 extends BasePlugin implements ExecutionToolPlugin { + constructor(name: string, logger: LoggerService) { + super(name, "execution", logger); + } + + protected async performInitialization(): Promise {} + protected async performHealthCheck(): Promise> { + return { healthy: true, message: "Healthy" }; + } + async executeAction(_action: Action): Promise { + return { success: true, output: "Mock" }; + } + listCapabilities() { + return []; + } + listProvisioningCapabilities() { + return [ + { + name: "create_vm", + description: "Create VM", + operation: "create" as const, + parameters: [], + }, + ]; + } + } + + class MockProvisioningTool2 extends BasePlugin implements ExecutionToolPlugin { + constructor(name: string, logger: LoggerService) { + super(name, "execution", logger); + } + + protected async performInitialization(): Promise {} + protected async performHealthCheck(): Promise> { + return { healthy: true, message: "Healthy" }; + } + async executeAction(_action: Action): Promise { + return { success: true, output: "Mock" }; + } + listCapabilities() { + return []; + } + listProvisioningCapabilities() { + return [ + { + name: "create_container", + description: "Create container", + operation: "create" as const, + parameters: [], + }, + ]; + } + } + + const tool1 = new MockProvisioningTool1("proxmox", logger); + const tool2 = new MockProvisioningTool2("docker", logger); + + manager.registerPlugin(tool1, { + enabled: true, + name: "proxmox", + type: "execution", + config: {}, + }); + + manager.registerPlugin(tool2, { + enabled: true, + name: "docker", + type: "execution", + config: {}, + }); + + const capabilities = manager.getAllProvisioningCapabilities(); + + expect(capabilities).toHaveLength(2); + expect(capabilities.find(c => c.source === "proxmox")).toBeDefined(); + expect(capabilities.find(c => c.source === "docker")).toBeDefined(); + }); + + it("should handle errors when getting provisioning capabilities", () => { + const logger = new LoggerService(); + const manager = new IntegrationManager({ logger }); + + // Create a mock plugin that throws an error + class MockFailingTool extends BasePlugin implements ExecutionToolPlugin { + constructor(name: string, logger: LoggerService) { + super(name, "execution", logger); + } + + protected async performInitialization(): Promise {} + protected async performHealthCheck(): Promise> { + return { healthy: true, message: "Healthy" }; + } + async executeAction(_action: Action): Promise { + return { success: true, output: "Mock" }; + } + listCapabilities() { + return []; + } + listProvisioningCapabilities() { + throw new Error("Failed to get provisioning capabilities"); + } + } + + const tool = new MockFailingTool("failing", logger); + + manager.registerPlugin(tool, { + enabled: true, + name: "failing", + type: "execution", + config: {}, + }); + + // Should not throw, should return empty array + const capabilities = manager.getAllProvisioningCapabilities(); + expect(capabilities).toEqual([]); + }); + }); }); diff --git a/backend/test/integrations/NodeLinkingService.test.ts b/backend/test/integrations/NodeLinkingService.test.ts index 729f1d8a..d8d0d7e0 100644 --- a/backend/test/integrations/NodeLinkingService.test.ts +++ b/backend/test/integrations/NodeLinkingService.test.ts @@ -240,6 +240,78 @@ describe("NodeLinkingService", () => { expect(linkedNodes[0].sources).toEqual(["bolt"]); expect(linkedNodes[0].linked).toBe(false); }); + + it("should store source-specific data for each source", () => { + // Test that source-specific IDs and URIs are preserved + const nodes: Node[] = [ + { + id: "debian13.test.example42.com", + name: "debian13.test.example42.com", + uri: "ssh://debian13.test.example42.com", + transport: "ssh", + config: {}, + source: "bolt", + } as Node & { source: string }, + { + id: "proxmox:minis:100", + name: "debian13.test.example42.com", + uri: "proxmox://minis/100", + transport: "ssh", + config: {}, + source: "proxmox", + metadata: { + vmid: 100, + node: "minis", + type: "qemu", + status: "running", + }, + } as Node & { source: string; metadata: Record }, + { + id: "debian13.test.example42.com", + name: "debian13.test.example42.com", + uri: "ssh://debian13.test.example42.com", + transport: "ssh", + config: {}, + source: "puppetdb", + } as Node & { source: string }, + ]; + + const linkedNodes = service.linkNodes(nodes); + + // Should have only one linked node + expect(linkedNodes).toHaveLength(1); + + const linkedNode = linkedNodes[0]; + + // Primary ID should be the name (common identifier) + expect(linkedNode.id).toBe("debian13.test.example42.com"); + expect(linkedNode.name).toBe("debian13.test.example42.com"); + + // Should include all sources + expect(linkedNode.sources).toContain("proxmox"); + expect(linkedNode.sources).toContain("bolt"); + expect(linkedNode.sources).toContain("puppetdb"); + expect(linkedNode.sources).toHaveLength(3); + + // Should be marked as linked + expect(linkedNode.linked).toBe(true); + + // Should have source-specific data + expect(linkedNode.sourceData).toBeDefined(); + expect(linkedNode.sourceData.proxmox).toBeDefined(); + expect(linkedNode.sourceData.proxmox.id).toBe("proxmox:minis:100"); + expect(linkedNode.sourceData.proxmox.uri).toBe("proxmox://minis/100"); + expect(linkedNode.sourceData.proxmox.metadata).toBeDefined(); + expect(linkedNode.sourceData.proxmox.metadata?.vmid).toBe(100); + + expect(linkedNode.sourceData.bolt).toBeDefined(); + expect(linkedNode.sourceData.bolt.id).toBe("debian13.test.example42.com"); + expect(linkedNode.sourceData.bolt.uri).toBe("ssh://debian13.test.example42.com"); + + expect(linkedNode.sourceData.puppetdb).toBeDefined(); + expect(linkedNode.sourceData.puppetdb.id).toBe("debian13.test.example42.com"); + expect(linkedNode.sourceData.puppetdb.uri).toBe("ssh://debian13.test.example42.com"); + }); }); describe("findMatchingNodes", () => { diff --git a/backend/test/integrations/aws/AWSPlugin.executeAction.test.ts b/backend/test/integrations/aws/AWSPlugin.executeAction.test.ts new file mode 100644 index 00000000..ed5f012b --- /dev/null +++ b/backend/test/integrations/aws/AWSPlugin.executeAction.test.ts @@ -0,0 +1,252 @@ +/** + * Tests for AWSPlugin.executeAction — provisioning and lifecycle actions + * + * Validates: Requirements 10.1-10.4, 11.1-11.4 + */ + +import { describe, it, expect, vi, beforeEach } from "vitest"; +import { AWSPlugin } from "../../../src/integrations/aws/AWSPlugin"; +import { AWSAuthenticationError } from "../../../src/integrations/aws/types"; +import type { Action } from "../../../src/integrations/types"; +import type { JournalService } from "../../../src/services/journal/JournalService"; + +// Shared mock methods accessible from tests +const mockProvisionInstance = vi.fn().mockResolvedValue("i-new123"); +const mockStartInstance = vi.fn().mockResolvedValue(undefined); +const mockStopInstance = vi.fn().mockResolvedValue(undefined); +const mockRebootInstance = vi.fn().mockResolvedValue(undefined); +const mockTerminateInstance = vi.fn().mockResolvedValue(undefined); + +// Mock the AWSService module — must return a proper class +vi.mock("../../../src/integrations/aws/AWSService", () => { + return { + AWSService: class MockAWSService { + provisionInstance = mockProvisionInstance; + startInstance = mockStartInstance; + stopInstance = mockStopInstance; + rebootInstance = mockRebootInstance; + terminateInstance = mockTerminateInstance; + }, + }; +}); + +function createMockJournalService(): JournalService { + return { + recordEvent: vi.fn().mockResolvedValue("journal-id-1"), + } as unknown as JournalService; +} + +async function createInitializedPlugin(journalService?: JournalService) { + const plugin = new AWSPlugin(undefined, undefined, journalService); + await plugin.initialize({ + enabled: true, + name: "aws", + type: "both", + config: { + accessKeyId: "AKIATEST", + secretAccessKey: "secret", + region: "us-east-1", + }, + }); + return plugin; +} + +describe("AWSPlugin.executeAction", () => { + beforeEach(() => { + mockProvisionInstance.mockReset().mockResolvedValue("i-new123"); + mockStartInstance.mockReset().mockResolvedValue(undefined); + mockStopInstance.mockReset().mockResolvedValue(undefined); + mockRebootInstance.mockReset().mockResolvedValue(undefined); + mockTerminateInstance.mockReset().mockResolvedValue(undefined); + }); + describe("provisioning", () => { + it("should provision an instance and return success with instance ID", async () => { + const plugin = await createInitializedPlugin(); + const action: Action = { + type: "task", + target: "new", + action: "provision", + parameters: { imageId: "ami-123", instanceType: "t2.micro" }, + }; + + const result = await plugin.executeAction(action); + + expect(result.status).toBe("success"); + expect(result.action).toBe("provision"); + expect(result.results[0].nodeId).toBe("i-new123"); + expect(result.results[0].output?.stdout).toContain("i-new123"); + }); + + it("should also accept create_instance as provisioning action", async () => { + const plugin = await createInitializedPlugin(); + const action: Action = { + type: "task", + target: "new", + action: "create_instance", + parameters: { imageId: "ami-456" }, + }; + + const result = await plugin.executeAction(action); + + expect(result.status).toBe("success"); + expect(result.action).toBe("create_instance"); + }); + }); + + describe("lifecycle actions", () => { + const lifecycleActions = ["start", "stop", "reboot", "terminate"] as const; + + for (const actionName of lifecycleActions) { + it(`should execute ${actionName} and return success`, async () => { + const plugin = await createInitializedPlugin(); + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: actionName, + }; + + const result = await plugin.executeAction(action); + + expect(result.status).toBe("success"); + expect(result.action).toBe(actionName); + expect(result.targetNodes).toContain("aws:us-east-1:i-abc123"); + }); + } + }); + + describe("unsupported action", () => { + it("should return failed result for unsupported action", async () => { + const plugin = await createInitializedPlugin(); + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: "hibernate", + }; + + const result = await plugin.executeAction(action); + + expect(result.status).toBe("failed"); + expect(result.error).toContain("Unsupported AWS action"); + }); + }); + + describe("journal recording", () => { + it("should record a journal entry on successful action", async () => { + const journal = createMockJournalService(); + const plugin = await createInitializedPlugin(journal); + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: "start", + }; + + await plugin.executeAction(action); + + expect(journal.recordEvent).toHaveBeenCalledTimes(1); + const entry = (journal.recordEvent as ReturnType).mock.calls[0][0]; + expect(entry.source).toBe("aws"); + expect(entry.eventType).toBe("start"); + expect(entry.summary).toContain("succeeded"); + }); + + it("should record a journal entry on failed action", async () => { + const journal = createMockJournalService(); + const plugin = await createInitializedPlugin(journal); + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: "unknown_action", + }; + + await plugin.executeAction(action); + + expect(journal.recordEvent).toHaveBeenCalledTimes(1); + const entry = (journal.recordEvent as ReturnType).mock.calls[0][0]; + expect(entry.summary).toContain("failed"); + }); + + it("should record journal on provision success", async () => { + const journal = createMockJournalService(); + const plugin = await createInitializedPlugin(journal); + const action: Action = { + type: "task", + target: "new", + action: "provision", + parameters: { imageId: "ami-123" }, + }; + + await plugin.executeAction(action); + + expect(journal.recordEvent).toHaveBeenCalledTimes(1); + const entry = (journal.recordEvent as ReturnType).mock.calls[0][0]; + expect(entry.eventType).toBe("provision"); + expect(entry.source).toBe("aws"); + }); + + it("should not fail if journalService is not set", async () => { + const plugin = await createInitializedPlugin(); // no journal + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: "stop", + }; + + const result = await plugin.executeAction(action); + expect(result.status).toBe("success"); + }); + }); + + describe("setJournalService", () => { + it("should allow setting journal service after construction", async () => { + const plugin = await createInitializedPlugin(); + const journal = createMockJournalService(); + plugin.setJournalService(journal); + + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: "reboot", + }; + + await plugin.executeAction(action); + + expect(journal.recordEvent).toHaveBeenCalledTimes(1); + }); + }); + + describe("authentication errors", () => { + it("should throw AWSAuthenticationError when service throws it", async () => { + mockStartInstance.mockRejectedValueOnce( + new AWSAuthenticationError("Expired credentials") + ); + + const journal = createMockJournalService(); + const plugin = await createInitializedPlugin(journal); + + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: "start", + }; + + await expect(plugin.executeAction(action)).rejects.toThrow(AWSAuthenticationError); + // Journal should still be recorded for the failure + expect(journal.recordEvent).toHaveBeenCalledTimes(1); + }); + }); + + describe("not initialized", () => { + it("should throw if plugin is not initialized", async () => { + const plugin = new AWSPlugin(); + const action: Action = { + type: "command", + target: "aws:us-east-1:i-abc123", + action: "start", + }; + + expect(() => plugin.executeAction(action)).rejects.toThrow( + "AWS integration is not initialized" + ); + }); + }); +}); diff --git a/backend/test/middleware/authMiddleware.test.ts b/backend/test/middleware/authMiddleware.test.ts index 08e92bc4..e3118fb5 100644 --- a/backend/test/middleware/authMiddleware.test.ts +++ b/backend/test/middleware/authMiddleware.test.ts @@ -1,18 +1,20 @@ import { describe, it, expect, beforeEach, afterEach } from "vitest"; import type { Request, Response, NextFunction } from "express"; -import { Database } from "sqlite3"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; import { createAuthMiddleware } from "../../src/middleware/authMiddleware"; import { AuthenticationService } from "../../src/services/AuthenticationService"; describe("Authentication Middleware", () => { - let db: Database; + let db: DatabaseAdapter; let authService: AuthenticationService; let middleware: ReturnType; const jwtSecret = "test-secret-key-for-middleware-testing"; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize database schema await initializeSchema(db); @@ -20,11 +22,10 @@ describe("Authentication Middleware", () => { authService = new AuthenticationService(db, jwtSecret); // Create test user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ + [ "test-user-id", "testuser", "test@example.com", @@ -35,13 +36,8 @@ describe("Authentication Middleware", () => { 0, new Date().toISOString(), new Date().toISOString() - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + ] + ); // Create middleware instance middleware = createAuthMiddleware(db, jwtSecret); @@ -49,9 +45,7 @@ describe("Authentication Middleware", () => { afterEach(async () => { // Close database - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); }); // Helper to create mock request/response @@ -322,84 +316,46 @@ describe("Authentication Middleware", () => { }); // Helper function to initialize database schema -async function initializeSchema(db: Database): Promise { - const schema = ` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT NOT NULL UNIQUE, - email TEXT NOT NULL UNIQUE, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER NOT NULL DEFAULT 1, - isAdmin INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - isBuiltIn INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE revoked_tokens ( - token TEXT PRIMARY KEY, - userId TEXT NOT NULL, - revokedAt TEXT NOT NULL, - expiresAt TEXT NOT NULL, - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE - ); - - CREATE INDEX idx_revoked_tokens_expires ON revoked_tokens(expiresAt); - CREATE INDEX idx_user_roles_user ON user_roles(userId); - CREATE INDEX idx_user_groups_user ON user_groups(userId); - CREATE INDEX idx_group_roles_group ON group_roles(groupId); - `; - - return new Promise((resolve, reject) => { - db.exec(schema, (err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function initializeSchema(db: DatabaseAdapter): Promise { + await db.execute(`CREATE TABLE users ( + id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE, + passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, + isActive INTEGER NOT NULL DEFAULT 1, isAdmin INTEGER NOT NULL DEFAULT 0, + createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT + )`); + await db.execute(`CREATE TABLE groups ( + id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, + createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL + )`); + await db.execute(`CREATE TABLE roles ( + id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, + isBuiltIn INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL + )`); + await db.execute(`CREATE TABLE user_groups ( + userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, + PRIMARY KEY (userId, groupId), + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE + )`); + await db.execute(`CREATE TABLE user_roles ( + userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, + PRIMARY KEY (userId, roleId), + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE + )`); + await db.execute(`CREATE TABLE group_roles ( + groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, + PRIMARY KEY (groupId, roleId), + FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, + FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE + )`); + await db.execute(`CREATE TABLE revoked_tokens ( + token TEXT PRIMARY KEY, userId TEXT NOT NULL, revokedAt TEXT NOT NULL, + expiresAt TEXT NOT NULL, + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE + )`); + await db.execute(`CREATE INDEX idx_revoked_tokens_expires ON revoked_tokens(expiresAt)`); + await db.execute(`CREATE INDEX idx_user_roles_user ON user_roles(userId)`); + await db.execute(`CREATE INDEX idx_user_groups_user ON user_groups(userId)`); + await db.execute(`CREATE INDEX idx_group_roles_group ON group_roles(groupId)`); } diff --git a/backend/test/middleware/rbacMiddleware.test.ts b/backend/test/middleware/rbacMiddleware.test.ts index 91bca9fd..7c461fbf 100644 --- a/backend/test/middleware/rbacMiddleware.test.ts +++ b/backend/test/middleware/rbacMiddleware.test.ts @@ -1,13 +1,14 @@ import { describe, it, expect, beforeEach, afterEach } from "vitest"; import type { Request, Response, NextFunction } from "express"; -import { Database } from "sqlite3"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; import { createRbacMiddleware } from "../../src/middleware/rbacMiddleware"; import { PermissionService } from "../../src/services/PermissionService"; import { UserService } from "../../src/services/UserService"; import { RoleService } from "../../src/services/RoleService"; describe("RBAC Middleware", () => { - let db: Database; + let db: DatabaseAdapter; let permissionService: PermissionService; let userService: UserService; let roleService: RoleService; @@ -30,7 +31,8 @@ describe("RBAC Middleware", () => { beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize database schema await initializeSchema(db); @@ -120,9 +122,7 @@ describe("RBAC Middleware", () => { afterEach(async () => { // Close database - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); }); // Helper to create mock request/response @@ -453,139 +453,32 @@ describe("RBAC Middleware", () => { }); // Helper function to initialize database schema -async function initializeSchema(db: Database): Promise { - const schema = ` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT NOT NULL UNIQUE, - email TEXT NOT NULL UNIQUE, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER NOT NULL DEFAULT 1, - isAdmin INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - isBuiltIn INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE permissions ( - id TEXT PRIMARY KEY, - resource TEXT NOT NULL, - action TEXT NOT NULL, - description TEXT NOT NULL, - createdAt TEXT NOT NULL, - UNIQUE(resource, action) - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, - FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE - ); - - CREATE TABLE revoked_tokens ( - token TEXT PRIMARY KEY, - userId TEXT NOT NULL, - revokedAt TEXT NOT NULL, - expiresAt TEXT NOT NULL, - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE - ); - - CREATE TABLE audit_logs ( - id TEXT PRIMARY KEY, - timestamp TEXT NOT NULL, - eventType TEXT NOT NULL, - action TEXT NOT NULL, - userId TEXT, - targetUserId TEXT, - targetResourceType TEXT, - targetResourceId TEXT, - ipAddress TEXT, - userAgent TEXT, - details TEXT, - result TEXT NOT NULL, - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE SET NULL - ); - - CREATE INDEX idx_revoked_tokens_expires ON revoked_tokens(expiresAt); - CREATE INDEX idx_user_roles_user ON user_roles(userId); - CREATE INDEX idx_user_groups_user ON user_groups(userId); - CREATE INDEX idx_group_roles_group ON group_roles(groupId); - CREATE INDEX idx_role_permissions_role ON role_permissions(roleId); - CREATE INDEX idx_permissions_resource_action ON permissions(resource, action); - CREATE INDEX idx_audit_logs_user ON audit_logs(userId); - CREATE INDEX idx_audit_logs_timestamp ON audit_logs(timestamp); - - CREATE TABLE config ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - INSERT INTO config (key, value, updatedAt) VALUES - ('allow_self_registration', 'false', datetime('now')), - ('default_new_user_role', 'role-viewer-001', datetime('now')); - `; - - return new Promise((resolve, reject) => { - db.exec(schema, (err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function initializeSchema(db: DatabaseAdapter): Promise { + await db.execute(`CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE, passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, isActive INTEGER NOT NULL DEFAULT 1, isAdmin INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT )`); + await db.execute(`CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, isBuiltIn INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE permissions ( id TEXT PRIMARY KEY, resource TEXT NOT NULL, action TEXT NOT NULL, description TEXT NOT NULL, createdAt TEXT NOT NULL, UNIQUE(resource, action) )`); + await db.execute(`CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE )`); + await db.execute(`CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); + await db.execute(`CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId), FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); + await db.execute(`CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId), FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE )`); + await db.execute(`CREATE TABLE revoked_tokens ( token TEXT PRIMARY KEY, userId TEXT NOT NULL, revokedAt TEXT NOT NULL, expiresAt TEXT NOT NULL, FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE )`); + await db.execute(`CREATE TABLE audit_logs ( id TEXT PRIMARY KEY, timestamp TEXT NOT NULL, eventType TEXT NOT NULL, action TEXT NOT NULL, userId TEXT, targetUserId TEXT, targetResourceType TEXT, targetResourceId TEXT, ipAddress TEXT, userAgent TEXT, details TEXT, result TEXT NOT NULL, FOREIGN KEY (userId) REFERENCES users(id) ON DELETE SET NULL )`); + await db.execute(`CREATE INDEX idx_revoked_tokens_expires ON revoked_tokens(expiresAt)`); + await db.execute(`CREATE INDEX idx_user_roles_user ON user_roles(userId)`); + await db.execute(`CREATE INDEX idx_user_groups_user ON user_groups(userId)`); + await db.execute(`CREATE INDEX idx_group_roles_group ON group_roles(groupId)`); + await db.execute(`CREATE INDEX idx_role_permissions_role ON role_permissions(roleId)`); + await db.execute(`CREATE INDEX idx_permissions_resource_action ON permissions(resource, action)`); + await db.execute(`CREATE INDEX idx_audit_logs_user ON audit_logs(userId)`); + await db.execute(`CREATE INDEX idx_audit_logs_timestamp ON audit_logs(timestamp)`); + await db.execute(`CREATE TABLE config ( key TEXT PRIMARY KEY, value TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`INSERT INTO config (key, value, updatedAt) VALUES ('allow_self_registration', 'false', datetime('now')), ('default_new_user_role', 'role-viewer-001', datetime('now'))`); } // Helper function to create a user async function createUser( - db: Database, + db: DatabaseAdapter, data: { id: string; username: string; @@ -594,26 +487,20 @@ async function createUser( isAdmin: number; } ): Promise { - return new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - data.id, - data.username, - data.email, - "$2b$10$abcdefghijklmnopqrstuv", // dummy hash - "Test", - "User", - data.isActive, - data.isAdmin, - new Date().toISOString(), - new Date().toISOString() - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + [ + data.id, + data.username, + data.email, + "$2b$10$abcdefghijklmnopqrstuv", // dummy hash + "Test", + "User", + data.isActive, + data.isAdmin, + new Date().toISOString(), + new Date().toISOString() + ] + ); } diff --git a/backend/test/performance/database-performance.test.ts b/backend/test/performance/database-performance.test.ts index 4afd2659..03535303 100644 --- a/backend/test/performance/database-performance.test.ts +++ b/backend/test/performance/database-performance.test.ts @@ -2,16 +2,15 @@ * Database Performance Tests * * Tests database operations with large datasets - * Extends the existing database performance test with additional scenarios * * Run with: npm test -- backend/test/performance/database-performance.test.ts */ import { describe, it, expect, beforeAll, afterAll } from 'vitest'; -import sqlite3 from 'sqlite3'; +import { SQLiteAdapter } from '../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../src/database/DatabaseAdapter'; import { ExecutionRepository, type ExecutionRecord } from '../../src/database/ExecutionRepository'; -import { readFileSync } from 'fs'; -import { join } from 'path'; +import { MigrationRunner } from '../../src/database/MigrationRunner'; // Performance thresholds (in milliseconds) const DB_THRESHOLDS = { @@ -24,56 +23,12 @@ const DB_THRESHOLDS = { BULK_DELETE: 500, }; -// Helper to promisify database operations -function runAsync(db: sqlite3.Database, sql: string, params?: any[]): Promise { - return new Promise((resolve, reject) => { - db.run(sql, params || [], (err) => { - if (err) reject(err); - else resolve(); - }); - }); -} - -function allAsync(db: sqlite3.Database, sql: string, params?: any[]): Promise { - return new Promise((resolve, reject) => { - db.all(sql, params || [], (err, rows) => { - if (err) reject(err); - else resolve(rows); - }); - }); -} - -async function setupDatabase(): Promise { - const db = new sqlite3.Database(':memory:'); - - // Load and execute schema - const schemaPath = join(__dirname, '../../src/database/schema.sql'); - const schema = readFileSync(schemaPath, 'utf-8'); - - // Execute full schema using db.exec (handles comments and multiple statements) - await new Promise((resolve, reject) => { - db.exec(schema, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Add columns from migration 006 (batch execution support) - await runAsync(db, 'ALTER TABLE executions ADD COLUMN batch_id TEXT'); - await runAsync(db, 'ALTER TABLE executions ADD COLUMN batch_position INTEGER'); - - return db; -} - async function generateTestData( repo: ExecutionRepository, count: number ): Promise { const statuses: Array<'running' | 'success' | 'failed' | 'partial'> = [ - 'running', - 'success', - 'failed', - 'partial', + 'running', 'success', 'failed', 'partial', ]; const types: Array<'command' | 'task' | 'facts'> = ['command', 'task', 'facts']; const nodes = Array.from({ length: 100 }, (_, i) => `node${i}.example.com`); @@ -111,16 +66,19 @@ async function measureTime(fn: () => Promise): Promise<{ result: T; durati } describe('Database Performance Tests', () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let repo: ExecutionRepository; beforeAll(async () => { - db = await setupDatabase(); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); + const migrationRunner = new MigrationRunner(db); + await migrationRunner.runPendingMigrations(); repo = new ExecutionRepository(db); }); - afterAll(() => { - db.close(); + afterAll(async () => { + await db.close(); }); describe('Insert Performance', () => { @@ -130,7 +88,6 @@ describe('Database Performance Tests', () => { }); console.log(` ✓ Inserted 100 records in ${duration}ms (threshold: ${DB_THRESHOLDS.INSERT_100_RECORDS}ms)`); - console.log(` Average: ${(duration / 100).toFixed(2)}ms per record`); expect(duration).toBeLessThan(DB_THRESHOLDS.INSERT_100_RECORDS); expect(result.length).toBe(100); }); @@ -141,7 +98,6 @@ describe('Database Performance Tests', () => { }); console.log(` ✓ Inserted 1000 records in ${duration}ms (threshold: ${DB_THRESHOLDS.INSERT_1000_RECORDS}ms)`); - console.log(` Average: ${(duration / 1000).toFixed(2)}ms per record`); expect(duration).toBeLessThan(DB_THRESHOLDS.INSERT_1000_RECORDS); expect(result.length).toBe(1000); }); @@ -149,12 +105,11 @@ describe('Database Performance Tests', () => { describe('Query Performance with Indexes', () => { beforeAll(async () => { - // Ensure we have enough data await generateTestData(repo, 500); }); it('should query by status using index efficiently', async () => { - const { result, duration } = await measureTime(async () => { + const { duration } = await measureTime(async () => { return repo.findAll({ status: 'success' }, { page: 1, pageSize: 50 }); }); @@ -163,7 +118,7 @@ describe('Database Performance Tests', () => { }); it('should query by type using index efficiently', async () => { - const { result, duration } = await measureTime(async () => { + const { duration } = await measureTime(async () => { return repo.findAll({ type: 'command' }, { page: 1, pageSize: 50 }); }); @@ -175,7 +130,7 @@ describe('Database Performance Tests', () => { const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString(); const now = new Date().toISOString(); - const { result, duration } = await measureTime(async () => { + const { duration } = await measureTime(async () => { return repo.findAll( { startDate: thirtyDaysAgo, endDate: now }, { page: 1, pageSize: 50 } @@ -203,13 +158,9 @@ describe('Database Performance Tests', () => { it('should handle complex multi-filter queries efficiently', async () => { const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString(); - const { result, duration } = await measureTime(async () => { + const { duration } = await measureTime(async () => { return repo.findAll( - { - status: 'success', - type: 'command', - startDate: thirtyDaysAgo, - }, + { status: 'success', type: 'command', startDate: thirtyDaysAgo }, { page: 1, pageSize: 50 } ); }); @@ -262,9 +213,6 @@ describe('Database Performance Tests', () => { }); }); - // Note: ExecutionRepository does not have a delete method - // Delete operations are not part of the current API - describe('Concurrent Operations', () => { it('should handle concurrent reads efficiently', async () => { await generateTestData(repo, 200); @@ -306,23 +254,18 @@ describe('Database Performance Tests', () => { it('should verify indexes are being used', async () => { await generateTestData(repo, 1000); - // Query with index (status) const { duration: withIndex } = await measureTime(async () => { return repo.findAll({ status: 'success' }, { page: 1, pageSize: 50 }); }); - // Query without index (targetNode - uses LIKE on JSON) const { duration: withoutIndex } = await measureTime(async () => { return repo.findAll({ targetNode: 'node1' }, { page: 1, pageSize: 50 }); }); console.log(` ✓ Query with index: ${withIndex}ms`); console.log(` ✓ Query without index: ${withoutIndex}ms`); - console.log(` Index speedup: ${(withoutIndex / withIndex).toFixed(2)}x`); - // Indexed query should be significantly faster expect(withIndex).toBeLessThan(DB_THRESHOLDS.QUERY_WITH_INDEX); - // Non-indexed query will be slower but should still be reasonable expect(withoutIndex).toBeLessThan(DB_THRESHOLDS.QUERY_WITHOUT_INDEX); }); }); @@ -331,20 +274,6 @@ describe('Database Performance Tests', () => { it('should log database performance summary', () => { console.log('\n=== Database Performance Test Summary ==='); console.log('All database performance tests passed!'); - console.log('\nOperation Thresholds:'); - console.log(` - Insert 100 records: ${DB_THRESHOLDS.INSERT_100_RECORDS}ms`); - console.log(` - Insert 1000 records: ${DB_THRESHOLDS.INSERT_1000_RECORDS}ms`); - console.log(` - Query with index: ${DB_THRESHOLDS.QUERY_WITH_INDEX}ms`); - console.log(` - Query without index: ${DB_THRESHOLDS.QUERY_WITHOUT_INDEX}ms`); - console.log(` - Complex query: ${DB_THRESHOLDS.COMPLEX_QUERY}ms`); - console.log(` - Bulk update: ${DB_THRESHOLDS.BULK_UPDATE}ms`); - console.log(` - Bulk delete: ${DB_THRESHOLDS.BULK_DELETE}ms`); - console.log('\nRecommendations:'); - console.log(' - Indexes are working correctly for status, type, and date queries'); - console.log(' - Consider adding index for frequently queried JSON fields'); - console.log(' - Use pagination for large result sets'); - console.log(' - Monitor query performance in production'); - console.log(' - Consider archiving old execution records'); console.log('=========================================\n'); }); }); diff --git a/backend/test/performance/rbac-performance.test.ts b/backend/test/performance/rbac-performance.test.ts index 73d309b2..9f27ed57 100644 --- a/backend/test/performance/rbac-performance.test.ts +++ b/backend/test/performance/rbac-performance.test.ts @@ -14,7 +14,8 @@ */ import { describe, it, expect, beforeAll, afterAll, beforeEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../src/database/DatabaseAdapter'; import { AuthenticationService } from '../../src/services/AuthenticationService'; import { PermissionService } from '../../src/services/PermissionService'; import { UserService } from '../../src/services/UserService'; @@ -62,7 +63,7 @@ function calculateStats(durations: number[]): { } describe('RBAC Performance Tests', () => { - let db: Database; + let db: DatabaseAdapter; let authService: AuthenticationService; let permissionService: PermissionService; let userService: UserService; @@ -77,7 +78,8 @@ describe('RBAC Performance Tests', () => { beforeAll(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -495,137 +497,33 @@ describe('RBAC Performance Tests', () => { // Helper functions -async function initializeSchema(db: Database): Promise { - return new Promise((resolve, reject) => { - db.exec(` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT UNIQUE NOT NULL, - email TEXT UNIQUE NOT NULL, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER DEFAULT 1, - isAdmin INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - isBuiltIn INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE permissions ( - id TEXT PRIMARY KEY, - resource TEXT NOT NULL, - action TEXT NOT NULL, - description TEXT, - createdAt TEXT NOT NULL, - UNIQUE(resource, action) - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (groupId) REFERENCES groups(id) - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (roleId) REFERENCES roles(id) - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id), - FOREIGN KEY (roleId) REFERENCES roles(id) - ); - - CREATE TABLE role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id), - FOREIGN KEY (permissionId) REFERENCES permissions(id) - ); - - CREATE TABLE failed_login_attempts ( - id TEXT PRIMARY KEY, - username TEXT NOT NULL, - attemptedAt TEXT NOT NULL, - ipAddress TEXT, - reason TEXT - ); - - CREATE TABLE account_lockouts ( - id TEXT PRIMARY KEY, - username TEXT NOT NULL, - lockedAt TEXT NOT NULL, - lockoutType TEXT NOT NULL, - expiresAt TEXT, - failedAttempts INTEGER NOT NULL - ); - - -- Indexes for performance - CREATE INDEX idx_users_username ON users(username); - CREATE INDEX idx_users_email ON users(email); - CREATE INDEX idx_users_active ON users(isActive); - CREATE INDEX idx_user_roles_user ON user_roles(userId); - CREATE INDEX idx_user_roles_role ON user_roles(roleId); - CREATE INDEX idx_group_roles_group ON group_roles(groupId); - CREATE INDEX idx_group_roles_role ON group_roles(roleId); - CREATE INDEX idx_user_groups_user ON user_groups(userId); - CREATE INDEX idx_user_groups_group ON user_groups(groupId); - CREATE INDEX idx_role_permissions_role ON role_permissions(roleId); - CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId); - CREATE INDEX idx_permissions_resource_action ON permissions(resource, action); - - CREATE TABLE config ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - INSERT INTO config (key, value, updatedAt) VALUES - ('allow_self_registration', 'false', datetime('now')), - ('default_new_user_role', 'role-viewer-001', datetime('now')); - `, (err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function initializeSchema(db: DatabaseAdapter): Promise { + await db.execute(`CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT UNIQUE NOT NULL, email TEXT UNIQUE NOT NULL, passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, isActive INTEGER DEFAULT 1, isAdmin INTEGER DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT )`); + await db.execute(`CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT UNIQUE NOT NULL, description TEXT, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT UNIQUE NOT NULL, description TEXT, isBuiltIn INTEGER DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE permissions ( id TEXT PRIMARY KEY, resource TEXT NOT NULL, action TEXT NOT NULL, description TEXT, createdAt TEXT NOT NULL, UNIQUE(resource, action) )`); + await db.execute(`CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId), FOREIGN KEY (userId) REFERENCES users(id), FOREIGN KEY (groupId) REFERENCES groups(id) )`); + await db.execute(`CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId), FOREIGN KEY (userId) REFERENCES users(id), FOREIGN KEY (roleId) REFERENCES roles(id) )`); + await db.execute(`CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId), FOREIGN KEY (groupId) REFERENCES groups(id), FOREIGN KEY (roleId) REFERENCES roles(id) )`); + await db.execute(`CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId), FOREIGN KEY (roleId) REFERENCES roles(id), FOREIGN KEY (permissionId) REFERENCES permissions(id) )`); + await db.execute(`CREATE TABLE failed_login_attempts ( id TEXT PRIMARY KEY, username TEXT NOT NULL, attemptedAt TEXT NOT NULL, ipAddress TEXT, reason TEXT )`); + await db.execute(`CREATE TABLE account_lockouts ( id TEXT PRIMARY KEY, username TEXT NOT NULL, lockedAt TEXT NOT NULL, lockoutType TEXT NOT NULL, expiresAt TEXT, failedAttempts INTEGER NOT NULL )`); + await db.execute(`CREATE INDEX idx_users_username ON users(username)`); + await db.execute(`CREATE INDEX idx_users_email ON users(email)`); + await db.execute(`CREATE INDEX idx_users_active ON users(isActive)`); + await db.execute(`CREATE INDEX idx_user_roles_user ON user_roles(userId)`); + await db.execute(`CREATE INDEX idx_user_roles_role ON user_roles(roleId)`); + await db.execute(`CREATE INDEX idx_group_roles_group ON group_roles(groupId)`); + await db.execute(`CREATE INDEX idx_group_roles_role ON group_roles(roleId)`); + await db.execute(`CREATE INDEX idx_user_groups_user ON user_groups(userId)`); + await db.execute(`CREATE INDEX idx_user_groups_group ON user_groups(groupId)`); + await db.execute(`CREATE INDEX idx_role_permissions_role ON role_permissions(roleId)`); + await db.execute(`CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId)`); + await db.execute(`CREATE INDEX idx_permissions_resource_action ON permissions(resource, action)`); + await db.execute(`CREATE TABLE config ( key TEXT PRIMARY KEY, value TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`INSERT INTO config (key, value, updatedAt) VALUES ('allow_self_registration', 'false', datetime('now')), ('default_new_user_role', '', datetime('now'))`); } -async function closeDatabase(db: Database): Promise { - return new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function closeDatabase(db: DatabaseAdapter): Promise { + await db.close(); } diff --git a/backend/test/properties/auth/authentication-atomicity.property.test.ts b/backend/test/properties/auth/authentication-atomicity.property.test.ts index 363ef3e3..aaedf1c2 100644 --- a/backend/test/properties/auth/authentication-atomicity.property.test.ts +++ b/backend/test/properties/auth/authentication-atomicity.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { AuthenticationService } from '../../../src/services/AuthenticationService'; import { UserService } from '../../../src/services/UserService'; import * as fc from 'fast-check'; @@ -36,14 +36,15 @@ const validPasswordArbitrary = fc.string({ minLength: 4, maxLength: 46 }).map(ba * - No partial authentication states exist */ describe('Authentication Atomicity Properties', () => { - let db: Database; + let db: SQLiteAdapter; let authService: AuthenticationService; let userService: UserService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -54,12 +55,7 @@ describe('Authentication Atomicity Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -362,7 +358,7 @@ describe('Authentication Atomicity Properties', () => { }); // Helper function to initialize database schema -async function initializeSchema(db: Database): Promise { +async function initializeSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -455,16 +451,14 @@ async function initializeSchema(db: Database): Promise { INSERT INTO config (key, value, updatedAt) VALUES ('allow_self_registration', 'false', datetime('now')), ('default_new_user_role', 'role-viewer-001', datetime('now')); + + INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES + ('role-viewer-001', 'Viewer', 'Default viewer role', 1, datetime('now'), datetime('now')); `; const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } diff --git a/backend/test/properties/auth/password-hashing.property.test.ts b/backend/test/properties/auth/password-hashing.property.test.ts index 156aa775..d2aa3a6c 100644 --- a/backend/test/properties/auth/password-hashing.property.test.ts +++ b/backend/test/properties/auth/password-hashing.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { AuthenticationService } from '../../../src/services/AuthenticationService'; import * as fc from 'fast-check'; @@ -20,13 +20,14 @@ import * as fc from 'fast-check'; * - Both hashes verify correctly against the original password */ describe('Password Hashing Properties', () => { - let db: Database; + let db: SQLiteAdapter; let authService: AuthenticationService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize minimal schema (not needed for password hashing tests, but good practice) await initializeMinimalSchema(db); @@ -36,12 +37,7 @@ describe('Password Hashing Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -199,7 +195,7 @@ describe('Password Hashing Properties', () => { }); // Helper function to initialize minimal schema -async function initializeMinimalSchema(db: Database): Promise { +async function initializeMinimalSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -226,11 +222,6 @@ async function initializeMinimalSchema(db: Database): Promise { const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } diff --git a/backend/test/properties/auth/token-validity.property.test.ts b/backend/test/properties/auth/token-validity.property.test.ts index 95983840..ebdd55f6 100644 --- a/backend/test/properties/auth/token-validity.property.test.ts +++ b/backend/test/properties/auth/token-validity.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { AuthenticationService } from '../../../src/services/AuthenticationService'; import * as fc from 'fast-check'; import * as jwt from 'jsonwebtoken'; @@ -23,13 +23,14 @@ import * as jwt from 'jsonwebtoken'; * - Valid tokens can be verified */ describe('Token Validity Properties', () => { - let db: Database; + let db: SQLiteAdapter; let authService: AuthenticationService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize minimal schema await initializeMinimalSchema(db); @@ -39,12 +40,7 @@ describe('Token Validity Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -284,7 +280,7 @@ describe('Token Validity Properties', () => { }); // Helper function to initialize minimal schema -async function initializeMinimalSchema(db: Database): Promise { +async function initializeMinimalSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -329,11 +325,6 @@ async function initializeMinimalSchema(db: Database): Promise { const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } diff --git a/backend/test/properties/batch-execution/deduplication.property.test.ts b/backend/test/properties/batch-execution/deduplication.property.test.ts index c1a668a7..d9a43332 100644 --- a/backend/test/properties/batch-execution/deduplication.property.test.ts +++ b/backend/test/properties/batch-execution/deduplication.property.test.ts @@ -1,5 +1,6 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import sqlite3 from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../../src/database/DatabaseAdapter'; import { BatchExecutionService } from '../../../src/services/BatchExecutionService'; import type { ExecutionQueue } from '../../../src/services/ExecutionQueue'; import type { ExecutionRepository } from '../../../src/database/ExecutionRepository'; @@ -10,398 +11,202 @@ import fc from 'fast-check'; * Property-Based Tests for Node Deduplication * * **Validates: Requirements 7.5** - * - * Property 2: Node deduplication is idempotent - * ∀ nodeIds ∈ Array: - * deduplicateNodes(nodeIds) = deduplicateNodes(deduplicateNodes(nodeIds)) - * - * This property validates that: - * - Applying deduplication multiple times produces the same result - * - The result contains no duplicates - * - All original node IDs are preserved (no data loss) - * - Edge cases work correctly (empty arrays, single elements, all duplicates) */ describe('Node Deduplication Properties', () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; - let mockExecutionQueue: ExecutionQueue; - let mockExecutionRepository: ExecutionRepository; - let mockIntegrationManager: IntegrationManager; - - beforeEach(() => { - db = new sqlite3.Database(':memory:'); - mockExecutionQueue = {} as ExecutionQueue; - mockExecutionRepository = {} as ExecutionRepository; - mockIntegrationManager = {} as IntegrationManager; + beforeEach(async () => { + db = new SQLiteAdapter(':memory:'); + await db.initialize(); service = new BatchExecutionService( db, - mockExecutionQueue, - mockExecutionRepository, - mockIntegrationManager + {} as ExecutionQueue, + {} as ExecutionRepository, + {} as IntegrationManager ); }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); - /** - * Property 2: Node deduplication is idempotent - * - * **Validates: Requirements 7.5** - * - * This property test verifies that: - * 1. Applying deduplication multiple times produces the same result - * 2. The result contains no duplicates - * 3. All original node IDs are preserved (no data loss) - * 4. Order is preserved for first occurrence of each node - */ it('should be idempotent - applying deduplication multiple times produces same result', async () => { await fc.assert( fc.property( - // Generate random arrays of node IDs with potential duplicates - fc.array( - fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), - { minLength: 0, maxLength: 100 } - ), + fc.array(fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), { minLength: 0, maxLength: 100 }), (nodeIds) => { - // Apply deduplication once const deduplicatedOnce = (service as any).deduplicateNodes(nodeIds); - - // Apply deduplication twice const deduplicatedTwice = (service as any).deduplicateNodes(deduplicatedOnce); - - // Apply deduplication three times const deduplicatedThrice = (service as any).deduplicateNodes(deduplicatedTwice); - - // Property 1: Idempotency - all results should be identical expect(deduplicatedOnce).toEqual(deduplicatedTwice); expect(deduplicatedTwice).toEqual(deduplicatedThrice); - - // Property 2: No duplicates in result const uniqueSet = new Set(deduplicatedOnce); expect(deduplicatedOnce.length).toBe(uniqueSet.size); - - // Property 3: All original unique node IDs are preserved const originalUnique = new Set(nodeIds); const resultSet = new Set(deduplicatedOnce); expect(resultSet).toEqual(originalUnique); - - // Property 4: Result length should not exceed original length expect(deduplicatedOnce.length).toBeLessThanOrEqual(nodeIds.length); } ), - { - numRuns: 1000, - verbose: false - } + { numRuns: 1000, verbose: false } ); }); - /** - * Property: Deduplication preserves all unique elements - * - * **Validates: Requirements 7.5** - */ it('should preserve all unique node IDs without data loss', async () => { await fc.assert( fc.property( - fc.array( - fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), - { minLength: 1, maxLength: 100 } - ), + fc.array(fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), { minLength: 1, maxLength: 100 }), (nodeIds) => { const deduplicated = (service as any).deduplicateNodes(nodeIds); - - // Every unique node ID from input should be in output const originalUnique = new Set(nodeIds); const resultSet = new Set(deduplicated); - expect(resultSet.size).toBe(originalUnique.size); - for (const nodeId of originalUnique) { expect(resultSet.has(nodeId)).toBe(true); } } ), - { - numRuns: 500, - verbose: false - } + { numRuns: 500, verbose: false } ); }); - /** - * Property: Empty array handling - * - * **Validates: Requirements 7.5** - */ it('should handle empty arrays correctly', () => { const result = (service as any).deduplicateNodes([]); - expect(result).toEqual([]); expect(result.length).toBe(0); - - // Applying again should still be empty const resultAgain = (service as any).deduplicateNodes(result); expect(resultAgain).toEqual([]); }); - /** - * Property: Single element arrays - * - * **Validates: Requirements 7.5** - */ it('should handle single element arrays correctly', async () => { await fc.assert( fc.property( fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), (nodeId) => { const result = (service as any).deduplicateNodes([nodeId]); - expect(result).toEqual([nodeId]); expect(result.length).toBe(1); - - // Idempotency check const resultAgain = (service as any).deduplicateNodes(result); expect(resultAgain).toEqual([nodeId]); } ), - { - numRuns: 100, - verbose: false - } + { numRuns: 100, verbose: false } ); }); - /** - * Property: All duplicates scenario - * - * **Validates: Requirements 7.5** - */ it('should reduce array of all duplicates to single element', async () => { await fc.assert( fc.property( fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), fc.integer({ min: 1, max: 50 }), (nodeId, count) => { - // Create array with same node ID repeated const nodeIds = Array(count).fill(nodeId); - const result = (service as any).deduplicateNodes(nodeIds); - expect(result).toEqual([nodeId]); expect(result.length).toBe(1); - - // Idempotency check const resultAgain = (service as any).deduplicateNodes(result); expect(resultAgain).toEqual([nodeId]); } ), - { - numRuns: 200, - verbose: false - } + { numRuns: 200, verbose: false } ); }); - /** - * Property: Order preservation for first occurrence - * - * **Validates: Requirements 7.5** - */ it('should preserve order of first occurrence of each node', async () => { await fc.assert( fc.property( - fc.array( - fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), - { minLength: 2, maxLength: 50 } - ), + fc.array(fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), { minLength: 2, maxLength: 50 }), (nodeIds) => { const deduplicated = (service as any).deduplicateNodes(nodeIds); - - // Build expected order based on first occurrence const seen = new Set(); const expectedOrder: string[] = []; - for (const nodeId of nodeIds) { if (!seen.has(nodeId)) { seen.add(nodeId); expectedOrder.push(nodeId); } } - expect(deduplicated).toEqual(expectedOrder); } ), - { - numRuns: 500, - verbose: false - } + { numRuns: 500, verbose: false } ); }); - /** - * Property: Deduplication with high duplicate ratio - * - * **Validates: Requirements 7.5** - */ it('should handle arrays with high duplicate ratios efficiently', async () => { await fc.assert( fc.property( - // Generate a small set of unique IDs - fc.array( - fc.string({ minLength: 1, maxLength: 10 }).map(s => `node-${s}`), - { minLength: 1, maxLength: 10 } - ).chain(arr => { - // Ensure all elements are unique - const unique = [...new Set(arr)]; - return fc.constant(unique); - }), - // Generate many duplicates from that small set + fc.array(fc.string({ minLength: 1, maxLength: 10 }).map(s => `node-${s}`), { minLength: 1, maxLength: 10 }) + .chain(arr => { const unique = [...new Set(arr)]; return fc.constant(unique); }), fc.integer({ min: 50, max: 200 }), (uniqueNodeIds, totalCount) => { fc.pre(uniqueNodeIds.length > 0); - - // Create array with many duplicates using fc.constantFrom for determinism const nodeIds: string[] = []; for (let i = 0; i < totalCount; i++) { - const randomIndex = i % uniqueNodeIds.length; - nodeIds.push(uniqueNodeIds[randomIndex]); + nodeIds.push(uniqueNodeIds[i % uniqueNodeIds.length]); } - const deduplicated = (service as any).deduplicateNodes(nodeIds); - - // Should reduce to unique set const uniqueSet = new Set(uniqueNodeIds); const resultSet = new Set(deduplicated); - expect(resultSet).toEqual(uniqueSet); expect(deduplicated.length).toBe(uniqueSet.size); - - // Idempotency const deduplicatedAgain = (service as any).deduplicateNodes(deduplicated); expect(deduplicatedAgain).toEqual(deduplicated); } ), - { - numRuns: 200, - verbose: false - } + { numRuns: 200, verbose: false } ); }); - /** - * Property: Deduplication with no duplicates - * - * **Validates: Requirements 7.5** - */ it('should return identical array when no duplicates exist', async () => { await fc.assert( fc.property( - fc.array( - fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), - { minLength: 0, maxLength: 50 } - ).chain(arr => { - // Ensure all elements are unique - const unique = [...new Set(arr)]; - return fc.constant(unique); - }), + fc.array(fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), { minLength: 0, maxLength: 50 }) + .chain(arr => { const unique = [...new Set(arr)]; return fc.constant(unique); }), (nodeIds) => { const deduplicated = (service as any).deduplicateNodes(nodeIds); - - // Should be identical since no duplicates expect(deduplicated).toEqual(nodeIds); expect(deduplicated.length).toBe(nodeIds.length); - - // Idempotency const deduplicatedAgain = (service as any).deduplicateNodes(deduplicated); expect(deduplicatedAgain).toEqual(nodeIds); } ), - { - numRuns: 300, - verbose: false - } + { numRuns: 300, verbose: false } ); }); - /** - * Property: Deduplication is commutative with concatenation - * - * **Validates: Requirements 7.5** - */ it('should produce same result regardless of input order when combined', async () => { await fc.assert( fc.property( - fc.array( - fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), - { minLength: 1, maxLength: 30 } - ), - fc.array( - fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), - { minLength: 1, maxLength: 30 } - ), + fc.array(fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), { minLength: 1, maxLength: 30 }), + fc.array(fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), { minLength: 1, maxLength: 30 }), (nodeIds1, nodeIds2) => { - // Deduplicate concatenation in both orders const combined1 = (service as any).deduplicateNodes([...nodeIds1, ...nodeIds2]); const combined2 = (service as any).deduplicateNodes([...nodeIds2, ...nodeIds1]); - - // Both should contain the same unique elements (though order may differ) const set1 = new Set(combined1); const set2 = new Set(combined2); - expect(set1).toEqual(set2); expect(combined1.length).toBe(combined2.length); } ), - { - numRuns: 300, - verbose: false - } + { numRuns: 300, verbose: false } ); }); - /** - * Property: Deduplication result size bounds - * - * **Validates: Requirements 7.5** - */ it('should produce result with size between 0 and input length', async () => { await fc.assert( fc.property( - fc.array( - fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), - { minLength: 0, maxLength: 100 } - ), + fc.array(fc.string({ minLength: 1, maxLength: 20 }).map(s => `node-${s}`), { minLength: 0, maxLength: 100 }), (nodeIds) => { const deduplicated = (service as any).deduplicateNodes(nodeIds); - - // Result size should be within bounds expect(deduplicated.length).toBeGreaterThanOrEqual(0); expect(deduplicated.length).toBeLessThanOrEqual(nodeIds.length); - - // If input is empty, output is empty - if (nodeIds.length === 0) { - expect(deduplicated.length).toBe(0); - } - - // If input has elements, output has at least 1 (unless input was empty) - if (nodeIds.length > 0) { - expect(deduplicated.length).toBeGreaterThan(0); - } + if (nodeIds.length === 0) expect(deduplicated.length).toBe(0); + if (nodeIds.length > 0) expect(deduplicated.length).toBeGreaterThan(0); } ), - { - numRuns: 500, - verbose: false - } + { numRuns: 500, verbose: false } ); }); }); diff --git a/backend/test/properties/batch-execution/group-expansion.property.test.ts b/backend/test/properties/batch-execution/group-expansion.property.test.ts index a1440fd3..0a0bb503 100644 --- a/backend/test/properties/batch-execution/group-expansion.property.test.ts +++ b/backend/test/properties/batch-execution/group-expansion.property.test.ts @@ -1,5 +1,6 @@ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; -import sqlite3 from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../../src/database/DatabaseAdapter'; import { BatchExecutionService } from '../../../src/services/BatchExecutionService'; import type { ExecutionQueue } from '../../../src/services/ExecutionQueue'; import type { ExecutionRepository } from '../../../src/database/ExecutionRepository'; @@ -10,158 +11,81 @@ import fc from 'fast-check'; * Property-Based Tests for Group Expansion * * **Validates: Requirements 7.2, 7.3, 7.8** - * - * Property 1: Group expansion produces valid node IDs - * ∀ groups ∈ Groups, inventory ∈ Inventory: - * expandGroups(groups) ⟹ - * ∀ nodeId ∈ result: nodeId ∈ inventory.nodes ∧ - * result.length ≤ Σ(group.nodes.length) ∧ - * result = deduplicate(flatten(groups.map(g => g.nodes))) - * - * This property validates that: - * - All expanded node IDs exist in the inventory - * - Expansion handles linked groups (multiple sources) - * - Node IDs are properly deduplicated - * - Empty groups produce empty results - * - Missing groups are handled gracefully */ describe('Group Expansion Properties', () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; - let mockExecutionQueue: ExecutionQueue; - let mockExecutionRepository: ExecutionRepository; let mockIntegrationManager: IntegrationManager; - beforeEach(() => { - db = new sqlite3.Database(':memory:'); - mockExecutionQueue = {} as ExecutionQueue; - mockExecutionRepository = {} as ExecutionRepository; + beforeEach(async () => { + db = new SQLiteAdapter(':memory:'); + await db.initialize(); mockIntegrationManager = { getAggregatedInventory: vi.fn(), } as unknown as IntegrationManager; service = new BatchExecutionService( db, - mockExecutionQueue, - mockExecutionRepository, + {} as ExecutionQueue, + {} as ExecutionRepository, mockIntegrationManager ); }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); - /** - * Property 1: Group expansion produces valid node IDs - * - * **Validates: Requirements 7.2, 7.3, 7.8** - * - * This property test verifies that: - * 1. All expanded node IDs exist in the inventory - * 2. Expansion handles groups from multiple sources (linked groups) - * 3. Node IDs are properly deduplicated - * 4. Empty groups produce empty results - * 5. Missing groups are handled gracefully (skipped) - */ it('should expand groups to valid node IDs that exist in inventory', async () => { await fc.assert( fc.asyncProperty( - // Generate random inventory structure generateInventoryArbitrary(), async (inventory) => { - // Mock the integration manager to return our generated inventory vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue(inventory); - - // Extract group IDs from the inventory const groupIds = inventory.groups.map(g => g.id); - - // If there are no groups, skip this test case fc.pre(groupIds.length > 0); - // Call the private expandGroups method const expandedNodeIds = await (service as any).expandGroups(groupIds); - - // Property 1: All expanded node IDs must exist in the inventory const inventoryNodeIds = new Set(inventory.nodes.map(n => n.id)); for (const nodeId of expandedNodeIds) { expect(inventoryNodeIds.has(nodeId)).toBe(true); } - - // Property 2: Expanded nodes should match the union of all group nodes const expectedNodeIds = new Set(); for (const group of inventory.groups) { for (const nodeId of group.nodes) { expectedNodeIds.add(nodeId); } } - - // Convert to sorted arrays for comparison const expandedSorted = [...new Set(expandedNodeIds)].sort(); const expectedSorted = [...expectedNodeIds].sort(); - expect(expandedSorted).toEqual(expectedSorted); - - // Property 3: Result length should not exceed sum of all group node counts const totalGroupNodes = inventory.groups.reduce((sum, g) => sum + g.nodes.length, 0); expect(expandedNodeIds.length).toBeLessThanOrEqual(totalGroupNodes); - - // Property 4: Deduplication - if we deduplicate again, result should be the same const deduplicatedOnce = [...new Set(expandedNodeIds)]; const deduplicatedTwice = [...new Set(deduplicatedOnce)]; expect(deduplicatedOnce).toEqual(deduplicatedTwice); } ), - { - numRuns: 100, - timeout: 30000, - verbose: false - } + { numRuns: 100, timeout: 30000, verbose: false } ); }, 60000); - /** - * Property 2: Empty groups produce empty results - * - * **Validates: Requirements 7.2** - */ it('should return empty array when expanding empty groups', async () => { await fc.assert( fc.asyncProperty( - // Generate inventory with empty groups generateInventoryWithEmptyGroupsArbitrary(), async (inventory) => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue(inventory); - - const emptyGroupIds = inventory.groups - .filter(g => g.nodes.length === 0) - .map(g => g.id); - + const emptyGroupIds = inventory.groups.filter(g => g.nodes.length === 0).map(g => g.id); fc.pre(emptyGroupIds.length > 0); - const expandedNodeIds = await (service as any).expandGroups(emptyGroupIds); - - // Empty groups should produce empty results expect(expandedNodeIds).toEqual([]); } ), - { - numRuns: 50, - timeout: 30000 - } + { numRuns: 50, timeout: 30000 } ); }, 60000); - /** - * Property 3: Missing groups are handled gracefully - * - * **Validates: Requirements 7.6** - */ it('should skip missing groups and continue with valid ones', async () => { await fc.assert( fc.asyncProperty( @@ -169,154 +93,84 @@ describe('Group Expansion Properties', () => { fc.array(fc.string({ minLength: 1, maxLength: 20 }), { minLength: 1, maxLength: 5 }), async (inventory, missingGroupIds) => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue(inventory); - - // Ensure missing group IDs don't exist in inventory const existingGroupIds = new Set(inventory.groups.map(g => g.id)); const trulyMissingIds = missingGroupIds.filter(id => !existingGroupIds.has(id)); - fc.pre(trulyMissingIds.length > 0 && inventory.groups.length > 0); - - // Mix valid and missing group IDs const validGroupIds = inventory.groups.map(g => g.id); const mixedGroupIds = [...validGroupIds, ...trulyMissingIds]; - const expandedNodeIds = await (service as any).expandGroups(mixedGroupIds); - - // Should return nodes from valid groups only const expectedNodeIds = new Set(); for (const group of inventory.groups) { - for (const nodeId of group.nodes) { - expectedNodeIds.add(nodeId); - } + for (const nodeId of group.nodes) expectedNodeIds.add(nodeId); } - const expandedSet = new Set(expandedNodeIds); expect(expandedSet).toEqual(expectedNodeIds); } ), - { - numRuns: 50, - timeout: 30000 - } + { numRuns: 50, timeout: 30000 } ); }, 60000); - /** - * Property 4: Expansion is deterministic - * - * **Validates: Requirements 7.2** - */ it('should produce the same result when expanding the same groups multiple times', async () => { await fc.assert( fc.asyncProperty( generateInventoryArbitrary(), async (inventory) => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue(inventory); - const groupIds = inventory.groups.map(g => g.id); fc.pre(groupIds.length > 0); - - // Expand the same groups multiple times const result1 = await (service as any).expandGroups(groupIds); const result2 = await (service as any).expandGroups(groupIds); const result3 = await (service as any).expandGroups(groupIds); - - // Results should be identical (same order and content) expect(result1).toEqual(result2); expect(result2).toEqual(result3); } ), - { - numRuns: 50, - timeout: 30000 - } + { numRuns: 50, timeout: 30000 } ); }, 60000); - /** - * Property 5: Linked groups (multiple sources) are handled correctly - * - * **Validates: Requirements 7.4** - */ it('should handle linked groups from multiple sources', async () => { await fc.assert( fc.asyncProperty( generateInventoryWithLinkedGroupsArbitrary(), async (inventory) => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue(inventory); - - // Find groups that represent the same logical group from different sources - const linkedGroupIds = inventory.groups - .filter(g => g.name === 'production') // Groups with same name are "linked" - .map(g => g.id); - + const linkedGroupIds = inventory.groups.filter(g => g.name === 'production').map(g => g.id); fc.pre(linkedGroupIds.length > 1); - - // Expand all linked groups const expandedNodeIds = await (service as any).expandGroups(linkedGroupIds); - - // Should include all nodes from all sources const expectedNodeIds = new Set(); for (const group of inventory.groups.filter(g => g.name === 'production')) { - for (const nodeId of group.nodes) { - expectedNodeIds.add(nodeId); - } + for (const nodeId of group.nodes) expectedNodeIds.add(nodeId); } - const expandedSet = new Set(expandedNodeIds); expect(expandedSet).toEqual(expectedNodeIds); - - // All expanded nodes should exist in inventory const inventoryNodeIds = new Set(inventory.nodes.map(n => n.id)); for (const nodeId of expandedNodeIds) { expect(inventoryNodeIds.has(nodeId)).toBe(true); } } ), - { - numRuns: 50, - timeout: 30000 - } + { numRuns: 50, timeout: 30000 } ); }, 60000); - /** - * Property 6: Expansion handles overlapping groups correctly - * - * **Validates: Requirements 7.5** - */ it('should deduplicate nodes when groups overlap', async () => { await fc.assert( fc.asyncProperty( generateInventoryWithOverlappingGroupsArbitrary(), async (inventory) => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue(inventory); - const groupIds = inventory.groups.map(g => g.id); fc.pre(groupIds.length > 1); - const expandedNodeIds = await (service as any).expandGroups(groupIds); - - // Count occurrences of each node ID - const nodeCounts = new Map(); - for (const nodeId of expandedNodeIds) { - nodeCounts.set(nodeId, (nodeCounts.get(nodeId) || 0) + 1); - } - - // Each node should appear multiple times in the raw expansion - // (since groups overlap), but the result should contain duplicates - // Note: The current implementation doesn't deduplicate within expandGroups, - // that's done by deduplicateNodes. So we just verify all nodes are valid. const inventoryNodeIds = new Set(inventory.nodes.map(n => n.id)); for (const nodeId of expandedNodeIds) { expect(inventoryNodeIds.has(nodeId)).toBe(true); } } ), - { - numRuns: 50, - timeout: 30000 - } + { numRuns: 50, timeout: 30000 } ); }, 60000); }); @@ -325,9 +179,6 @@ describe('Group Expansion Properties', () => { // Arbitraries (Generators) // ============================================================================ -/** - * Generate a random inventory structure with nodes and groups - */ function generateInventoryArbitrary() { return fc.record({ nodes: fc.array( @@ -337,10 +188,7 @@ function generateInventoryArbitrary() { }), { minLength: 1, maxLength: 20 } ).chain(nodes => { - // Ensure unique node IDs - const uniqueNodes = Array.from( - new Map(nodes.map(n => [n.id, n])).values() - ); + const uniqueNodes = Array.from(new Map(nodes.map(n => [n.id, n])).values()); return fc.constant(uniqueNodes); }), groups: fc.array( @@ -348,33 +196,25 @@ function generateInventoryArbitrary() { id: fc.string({ minLength: 5, maxLength: 20 }).map(s => `group-${s}`), name: fc.string({ minLength: 5, maxLength: 20 }), source: fc.constantFrom('bolt', 'ansible', 'puppetdb', 'ssh'), - nodes: fc.constant([]), // Will be filled in next step + nodes: fc.constant([]), }), { minLength: 1, maxLength: 10 } ), }).chain(({ nodes, groups }) => { - // Assign random nodes to each group const nodeIds = nodes.map(n => n.id); - return fc.tuple( fc.constant(nodes), - ...groups.map(() => - fc.array(fc.constantFrom(...nodeIds), { minLength: 0, maxLength: Math.min(10, nodeIds.length) }) - ) + ...groups.map(() => fc.array(fc.constantFrom(...nodeIds), { minLength: 0, maxLength: Math.min(10, nodeIds.length) })) ).map(([nodes, ...groupNodeArrays]) => { const updatedGroups = groups.map((group, index) => ({ ...group, - nodes: [...new Set(groupNodeArrays[index])], // Deduplicate within group + nodes: [...new Set(groupNodeArrays[index])], })); - return { nodes, groups: updatedGroups }; }); }); } -/** - * Generate inventory with some empty groups - */ function generateInventoryWithEmptyGroupsArbitrary() { return fc.record({ nodes: fc.array( @@ -389,16 +229,13 @@ function generateInventoryWithEmptyGroupsArbitrary() { id: fc.string({ minLength: 5, maxLength: 20 }).map(s => `group-${s}`), name: fc.string({ minLength: 5, maxLength: 20 }), source: fc.constantFrom('bolt', 'ansible', 'puppetdb', 'ssh'), - nodes: fc.constant([]), // Empty groups + nodes: fc.constant([]), }), { minLength: 1, maxLength: 5 } ), }); } -/** - * Generate inventory with linked groups (same name, different sources) - */ function generateInventoryWithLinkedGroupsArbitrary() { return fc.record({ nodes: fc.array( @@ -408,37 +245,27 @@ function generateInventoryWithLinkedGroupsArbitrary() { }), { minLength: 3, maxLength: 20 } ).chain(nodes => { - const uniqueNodes = Array.from( - new Map(nodes.map(n => [n.id, n])).values() - ); + const uniqueNodes = Array.from(new Map(nodes.map(n => [n.id, n])).values()); return fc.constant(uniqueNodes); }), }).chain(({ nodes }) => { const nodeIds = nodes.map(n => n.id); const sources = ['bolt', 'ansible', 'puppetdb']; - - // Create linked groups with the same name but different sources return fc.tuple( fc.constant(nodes), - ...sources.map(() => - fc.array(fc.constantFrom(...nodeIds), { minLength: 1, maxLength: Math.min(5, nodeIds.length) }) - ) + ...sources.map(() => fc.array(fc.constantFrom(...nodeIds), { minLength: 1, maxLength: Math.min(5, nodeIds.length) })) ).map(([nodes, ...groupNodeArrays]) => { const groups = sources.map((source, index) => ({ id: `group-production-${source}`, - name: 'production', // Same name for linked groups + name: 'production', source, nodes: [...new Set(groupNodeArrays[index])], })); - return { nodes, groups }; }); }); } -/** - * Generate inventory with overlapping groups (groups share some nodes) - */ function generateInventoryWithOverlappingGroupsArbitrary() { return fc.record({ nodes: fc.array( @@ -448,15 +275,11 @@ function generateInventoryWithOverlappingGroupsArbitrary() { }), { minLength: 5, maxLength: 20 } ).chain(nodes => { - const uniqueNodes = Array.from( - new Map(nodes.map(n => [n.id, n])).values() - ); + const uniqueNodes = Array.from(new Map(nodes.map(n => [n.id, n])).values()); return fc.constant(uniqueNodes); }), }).chain(({ nodes }) => { const nodeIds = nodes.map(n => n.id); - - // Create groups that intentionally overlap return fc.tuple( fc.constant(nodes), fc.array(fc.constantFrom(...nodeIds), { minLength: 2, maxLength: Math.min(8, nodeIds.length) }), @@ -464,26 +287,10 @@ function generateInventoryWithOverlappingGroupsArbitrary() { fc.array(fc.constantFrom(...nodeIds), { minLength: 2, maxLength: Math.min(8, nodeIds.length) }) ).map(([nodes, group1Nodes, group2Nodes, group3Nodes]) => { const groups = [ - { - id: 'group-web', - name: 'web-servers', - source: 'bolt', - nodes: [...new Set(group1Nodes)], - }, - { - id: 'group-app', - name: 'app-servers', - source: 'bolt', - nodes: [...new Set(group2Nodes)], - }, - { - id: 'group-prod', - name: 'production', - source: 'ansible', - nodes: [...new Set(group3Nodes)], - }, + { id: 'group-web', name: 'web-servers', source: 'bolt', nodes: [...new Set(group1Nodes)] }, + { id: 'group-app', name: 'app-servers', source: 'bolt', nodes: [...new Set(group2Nodes)] }, + { id: 'group-prod', name: 'production', source: 'ansible', nodes: [...new Set(group3Nodes)] }, ]; - return { nodes, groups }; }); }); diff --git a/backend/test/properties/expert-mode/property-6.test.ts b/backend/test/properties/expert-mode/property-6.test.ts index b7755634..43548e43 100644 --- a/backend/test/properties/expert-mode/property-6.test.ts +++ b/backend/test/properties/expert-mode/property-6.test.ts @@ -482,7 +482,7 @@ describe('Property 6: Debug Info Completeness', () => { operationArb, requestIdArb, durationArb, - fc.array(fc.tuple(fc.string(), fc.anything()), { minLength: 1, maxLength: 10 }), + fc.array(fc.tuple(fc.string().filter(k => k !== '__proto__' && k !== 'constructor' && k !== 'prototype'), fc.anything()), { minLength: 1, maxLength: 10 }), (operation, requestId, duration, metadataEntries) => { const debugInfo = service.createDebugInfo(operation, requestId, duration); diff --git a/backend/test/properties/rbac/admin-privilege.property.test.ts b/backend/test/properties/rbac/admin-privilege.property.test.ts index fac2e775..36284393 100644 --- a/backend/test/properties/rbac/admin-privilege.property.test.ts +++ b/backend/test/properties/rbac/admin-privilege.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { UserService } from '../../../src/services/UserService'; import { PermissionService } from '../../../src/services/PermissionService'; import { AuthenticationService } from '../../../src/services/AuthenticationService'; @@ -22,7 +22,7 @@ import { randomUUID } from 'crypto'; * - Admin users bypass normal permission checks */ describe('Admin Privilege Properties', () => { - let db: Database; + let db: SQLiteAdapter; let userService: UserService; let permissionService: PermissionService; let authService: AuthenticationService; @@ -30,7 +30,8 @@ describe('Admin Privilege Properties', () => { beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeRBACSchema(db); @@ -42,12 +43,7 @@ describe('Admin Privilege Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -395,7 +391,7 @@ describe('Admin Privilege Properties', () => { // Helper functions -async function initializeRBACSchema(db: Database): Promise { +async function initializeRBACSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -484,16 +480,11 @@ async function initializeRBACSchema(db: Database): Promise { const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } -async function createTestUser(db: Database, userId: string, isAdmin: boolean = false): Promise { +async function createTestUser(db: SQLiteAdapter, userId: string, isAdmin: boolean = false): Promise { const sql = ` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, 1, ?, ?, ?) @@ -512,10 +503,5 @@ async function createTestUser(db: Database, userId: string, isAdmin: boolean = f now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } diff --git a/backend/test/properties/rbac/group-permission-inheritance.property.test.ts b/backend/test/properties/rbac/group-permission-inheritance.property.test.ts index 6989d932..18493d61 100644 --- a/backend/test/properties/rbac/group-permission-inheritance.property.test.ts +++ b/backend/test/properties/rbac/group-permission-inheritance.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { UserService } from '../../../src/services/UserService'; import { GroupService } from '../../../src/services/GroupService'; import { RoleService } from '../../../src/services/RoleService'; @@ -26,7 +26,7 @@ import { randomUUID } from 'crypto'; * - Group-based permission inheritance works correctly */ describe('Group Permission Inheritance Properties', () => { - let db: Database; + let db: SQLiteAdapter; let userService: UserService; let groupService: GroupService; let roleService: RoleService; @@ -36,7 +36,8 @@ describe('Group Permission Inheritance Properties', () => { beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeRBACSchema(db); @@ -50,12 +51,7 @@ describe('Group Permission Inheritance Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -526,7 +522,7 @@ describe('Group Permission Inheritance Properties', () => { // Helper functions -async function initializeRBACSchema(db: Database): Promise { +async function initializeRBACSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -615,16 +611,11 @@ async function initializeRBACSchema(db: Database): Promise { const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } -async function createTestUser(db: Database, userId: string): Promise { +async function createTestUser(db: SQLiteAdapter, userId: string): Promise { const sql = ` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, 1, 0, ?, ?) @@ -642,15 +633,10 @@ async function createTestUser(db: Database, userId: string): Promise { now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } -async function createTestGroup(db: Database, groupId: string, name?: string): Promise { +async function createTestGroup(db: SQLiteAdapter, groupId: string, name?: string): Promise { const sql = ` INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?) @@ -666,15 +652,10 @@ async function createTestGroup(db: Database, groupId: string, name?: string): Pr now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } -async function createTestRole(db: Database, roleId: string, name?: string): Promise { +async function createTestRole(db: SQLiteAdapter, roleId: string, name?: string): Promise { const sql = ` INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?) @@ -690,16 +671,11 @@ async function createTestRole(db: Database, roleId: string, name?: string): Prom now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } async function createTestPermission( - db: Database, + db: SQLiteAdapter, permissionId: string, resource: string, action: string @@ -718,10 +694,5 @@ async function createTestPermission( now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } diff --git a/backend/test/properties/rbac/inactive-user-denial.property.test.ts b/backend/test/properties/rbac/inactive-user-denial.property.test.ts index 996eef96..64e2004c 100644 --- a/backend/test/properties/rbac/inactive-user-denial.property.test.ts +++ b/backend/test/properties/rbac/inactive-user-denial.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { UserService } from '../../../src/services/UserService'; import { RoleService } from '../../../src/services/RoleService'; import { PermissionService } from '../../../src/services/PermissionService'; @@ -23,7 +23,7 @@ import { randomUUID } from 'crypto'; * - Deactivated users lose all access */ describe('Inactive User Denial Properties', () => { - let db: Database; + let db: SQLiteAdapter; let userService: UserService; let roleService: RoleService; let permissionService: PermissionService; @@ -32,7 +32,8 @@ describe('Inactive User Denial Properties', () => { beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeRBACSchema(db); @@ -45,12 +46,7 @@ describe('Inactive User Denial Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -445,7 +441,7 @@ describe('Inactive User Denial Properties', () => { // Helper functions -async function initializeRBACSchema(db: Database): Promise { +async function initializeRBACSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -534,17 +530,12 @@ async function initializeRBACSchema(db: Database): Promise { const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } async function createTestUser( - db: Database, + db: SQLiteAdapter, userId: string, isActive: boolean = true, isAdmin: boolean = false @@ -568,15 +559,10 @@ async function createTestUser( now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } -async function createTestRole(db: Database, roleId: string, name?: string): Promise { +async function createTestRole(db: SQLiteAdapter, roleId: string, name?: string): Promise { const sql = ` INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?) @@ -592,16 +578,11 @@ async function createTestRole(db: Database, roleId: string, name?: string): Prom now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } async function createTestPermission( - db: Database, + db: SQLiteAdapter, permissionId: string, resource: string, action: string @@ -620,15 +601,10 @@ async function createTestPermission( now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } -async function createTestGroup(db: Database, groupId: string): Promise { +async function createTestGroup(db: SQLiteAdapter, groupId: string): Promise { const sql = ` INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?) @@ -643,15 +619,10 @@ async function createTestGroup(db: Database, groupId: string): Promise { now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } -async function assignRoleToGroup(db: Database, groupId: string, roleId: string): Promise { +async function assignRoleToGroup(db: SQLiteAdapter, groupId: string, roleId: string): Promise { const sql = ` INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?) @@ -660,10 +631,5 @@ async function assignRoleToGroup(db: Database, groupId: string, roleId: string): const now = new Date().toISOString(); const params = [groupId, roleId, now]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } diff --git a/backend/test/properties/rbac/permission-monotonicity.property.test.ts b/backend/test/properties/rbac/permission-monotonicity.property.test.ts new file mode 100644 index 00000000..9114bf89 --- /dev/null +++ b/backend/test/properties/rbac/permission-monotonicity.property.test.ts @@ -0,0 +1,328 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; +import { RoleService } from '../../../src/services/RoleService'; +import { PermissionService } from '../../../src/services/PermissionService'; +import * as fc from 'fast-check'; +import { randomUUID } from 'crypto'; + +/** + * Property-Based Tests for Permission Monotonicity + * + * **Validates: Requirement 29.2** + * + * Property 19: Permission Monotonicity + * For any role, adding a permission to that role never removes existing + * permissions, and removing a permission never adds new ones. The set of + * permissions only changes by the explicitly added or removed permission. + */ +describe('Permission Monotonicity Properties', () => { + let db: SQLiteAdapter; + let roleService: RoleService; + let permissionService: PermissionService; + + beforeEach(async () => { + db = new SQLiteAdapter(':memory:'); + await db.initialize(); + await initializeRBACSchema(db); + roleService = new RoleService(db); + permissionService = new PermissionService(db); + }); + + afterEach(async () => { + await db.close(); + }); + + /** + * Adding a permission preserves all existing permissions (core monotonicity - add) + * + * **Validates: Requirement 29.2** + */ + it('adding a permission preserves all existing permissions', async () => { + await fc.assert( + fc.asyncProperty( + fc.integer({ min: 1, max: 5 }), + async (numExisting) => { + const roleId = randomUUID(); + await createTestRole(db, roleId); + + // Create and assign existing permissions + const existingPermIds: string[] = []; + for (let i = 0; i < numExisting; i++) { + const pId = randomUUID(); + await createTestPermission(db, pId, `res_${pId.substring(0, 8)}`, `act_${pId.substring(0, 8)}`); + await roleService.assignPermissionToRole(roleId, pId); + existingPermIds.push(pId); + } + + // Snapshot permissions before adding + const before = await roleService.getRolePermissions(roleId); + const beforeIds = new Set(before.map(p => p.id)); + + // Add a new permission + const newPermId = randomUUID(); + await createTestPermission(db, newPermId, `res_${newPermId.substring(0, 8)}`, `act_${newPermId.substring(0, 8)}`); + await roleService.assignPermissionToRole(roleId, newPermId); + + // Snapshot permissions after adding + const after = await roleService.getRolePermissions(roleId); + const afterIds = new Set(after.map(p => p.id)); + + // Property: every permission that existed before must still exist + for (const id of beforeIds) { + expect(afterIds.has(id)).toBe(true); + } + + // The new permission should also be present + expect(afterIds.has(newPermId)).toBe(true); + } + ), + { numRuns: 30, timeout: 30000 } + ); + }, 60000); + + /** + * Removing a permission does not introduce new permissions (core monotonicity - remove) + * + * **Validates: Requirement 29.2** + */ + it('removing a permission does not introduce new permissions', async () => { + await fc.assert( + fc.asyncProperty( + fc.integer({ min: 2, max: 5 }), + async (numPermissions) => { + const roleId = randomUUID(); + await createTestRole(db, roleId); + + // Create and assign permissions + const permIds: string[] = []; + for (let i = 0; i < numPermissions; i++) { + const pId = randomUUID(); + await createTestPermission(db, pId, `res_${pId.substring(0, 8)}`, `act_${pId.substring(0, 8)}`); + await roleService.assignPermissionToRole(roleId, pId); + permIds.push(pId); + } + + // Snapshot before removal + const before = await roleService.getRolePermissions(roleId); + const beforeIds = new Set(before.map(p => p.id)); + + // Remove the first permission + const removedId = permIds[0]; + await roleService.removePermissionFromRole(roleId, removedId); + + // Snapshot after removal + const after = await roleService.getRolePermissions(roleId); + const afterIds = new Set(after.map(p => p.id)); + + // Property: no new permissions should appear + for (const id of afterIds) { + expect(beforeIds.has(id)).toBe(true); + } + + // The removed permission should be gone + expect(afterIds.has(removedId)).toBe(false); + } + ), + { numRuns: 30, timeout: 30000 } + ); + }, 60000); + + /** + * Adding then removing a permission returns to original state (round-trip) + * + * **Validates: Requirement 29.2** + */ + it('adding then removing a permission returns to original state', async () => { + await fc.assert( + fc.asyncProperty( + fc.integer({ min: 1, max: 4 }), + async (numExisting) => { + const roleId = randomUUID(); + await createTestRole(db, roleId); + + // Create and assign initial permissions + for (let i = 0; i < numExisting; i++) { + const pId = randomUUID(); + await createTestPermission(db, pId, `res_${pId.substring(0, 8)}`, `act_${pId.substring(0, 8)}`); + await roleService.assignPermissionToRole(roleId, pId); + } + + // Snapshot original state + const original = await roleService.getRolePermissions(roleId); + const originalIds = new Set(original.map(p => p.id)); + + // Add a new permission + const newPermId = randomUUID(); + await createTestPermission(db, newPermId, `res_${newPermId.substring(0, 8)}`, `act_${newPermId.substring(0, 8)}`); + await roleService.assignPermissionToRole(roleId, newPermId); + + // Remove the same permission + await roleService.removePermissionFromRole(roleId, newPermId); + + // Snapshot final state + const final_ = await roleService.getRolePermissions(roleId); + const finalIds = new Set(final_.map(p => p.id)); + + // Property: final state equals original state + expect(finalIds.size).toBe(originalIds.size); + for (const id of originalIds) { + expect(finalIds.has(id)).toBe(true); + } + } + ), + { numRuns: 30, timeout: 30000 } + ); + }, 60000); + + /** + * Multiple sequential adds preserve all prior permissions + * + * **Validates: Requirement 29.2** + */ + it('multiple sequential adds preserve all prior permissions', async () => { + await fc.assert( + fc.asyncProperty( + fc.integer({ min: 2, max: 6 }), + async (numAdds) => { + const roleId = randomUUID(); + await createTestRole(db, roleId); + + const allPermIds: string[] = []; + + for (let i = 0; i < numAdds; i++) { + const pId = randomUUID(); + await createTestPermission(db, pId, `res_${pId.substring(0, 8)}`, `act_${pId.substring(0, 8)}`); + await roleService.assignPermissionToRole(roleId, pId); + allPermIds.push(pId); + + // After each add, verify all previously added permissions still exist + const current = await roleService.getRolePermissions(roleId); + const currentIds = new Set(current.map(p => p.id)); + + for (const prevId of allPermIds) { + expect(currentIds.has(prevId)).toBe(true); + } + } + } + ), + { numRuns: 20, timeout: 30000 } + ); + }, 60000); +}); + +// Helper functions + +async function initializeRBACSchema(db: SQLiteAdapter): Promise { + const schema = ` + CREATE TABLE IF NOT EXISTS users ( + id TEXT PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT NOT NULL UNIQUE, + passwordHash TEXT NOT NULL, + firstName TEXT NOT NULL, + lastName TEXT NOT NULL, + isActive INTEGER NOT NULL DEFAULT 1, + isAdmin INTEGER NOT NULL DEFAULT 0, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL, + lastLoginAt TEXT + ); + + CREATE TABLE IF NOT EXISTS roles ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + description TEXT NOT NULL, + isBuiltIn INTEGER NOT NULL DEFAULT 0, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS permissions ( + id TEXT PRIMARY KEY, + resource TEXT NOT NULL, + action TEXT NOT NULL, + description TEXT NOT NULL, + createdAt TEXT NOT NULL, + UNIQUE(resource, action) + ); + + CREATE TABLE IF NOT EXISTS user_roles ( + userId TEXT NOT NULL, + roleId TEXT NOT NULL, + assignedAt TEXT NOT NULL, + PRIMARY KEY (userId, roleId), + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS role_permissions ( + roleId TEXT NOT NULL, + permissionId TEXT NOT NULL, + assignedAt TEXT NOT NULL, + PRIMARY KEY (roleId, permissionId), + FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, + FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS groups ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + description TEXT NOT NULL, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS user_groups ( + userId TEXT NOT NULL, + groupId TEXT NOT NULL, + assignedAt TEXT NOT NULL, + PRIMARY KEY (userId, groupId), + FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS group_roles ( + groupId TEXT NOT NULL, + roleId TEXT NOT NULL, + assignedAt TEXT NOT NULL, + PRIMARY KEY (groupId, roleId), + FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, + FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS revoked_tokens ( + token TEXT PRIMARY KEY, + userId TEXT NOT NULL, + revokedAt TEXT NOT NULL, + expiresAt TEXT NOT NULL + ) + `; + + const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); + for (const statement of statements) { + await db.execute(statement); + } +} + +async function createTestRole(db: SQLiteAdapter, roleId: string, name?: string): Promise { + const now = new Date().toISOString(); + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + VALUES (?, ?, ?, 0, ?, ?)`, + [roleId, name || `role_${roleId.substring(0, 8)}`, 'Test role', now, now] + ); +} + +async function createTestPermission( + db: SQLiteAdapter, + permissionId: string, + resource: string, + action: string +): Promise { + const now = new Date().toISOString(); + await db.execute( + `INSERT INTO permissions (id, resource, "action", description, createdAt) + VALUES (?, ?, ?, ?, ?)`, + [permissionId, resource, action, `Test permission for ${resource}:${action}`, now] + ); +} diff --git a/backend/test/properties/rbac/permission-transitivity.property.test.ts b/backend/test/properties/rbac/permission-transitivity.property.test.ts index 32cc3a97..8e9a35a8 100644 --- a/backend/test/properties/rbac/permission-transitivity.property.test.ts +++ b/backend/test/properties/rbac/permission-transitivity.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { UserService } from '../../../src/services/UserService'; import { RoleService } from '../../../src/services/RoleService'; import { PermissionService } from '../../../src/services/PermissionService'; @@ -23,7 +23,7 @@ import { randomUUID } from 'crypto'; * - Permission inheritance is transitive */ describe('Permission Transitivity Properties', () => { - let db: Database; + let db: SQLiteAdapter; let userService: UserService; let roleService: RoleService; let permissionService: PermissionService; @@ -32,7 +32,8 @@ describe('Permission Transitivity Properties', () => { beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeRBACSchema(db); @@ -45,12 +46,7 @@ describe('Permission Transitivity Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -375,7 +371,7 @@ describe('Permission Transitivity Properties', () => { // Helper functions -async function initializeRBACSchema(db: Database): Promise { +async function initializeRBACSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -464,16 +460,11 @@ async function initializeRBACSchema(db: Database): Promise { const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } -async function createTestUser(db: Database, userId: string): Promise { +async function createTestUser(db: SQLiteAdapter, userId: string): Promise { const sql = ` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, 1, 0, ?, ?) @@ -491,15 +482,10 @@ async function createTestUser(db: Database, userId: string): Promise { now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } -async function createTestRole(db: Database, roleId: string, name?: string): Promise { +async function createTestRole(db: SQLiteAdapter, roleId: string, name?: string): Promise { const sql = ` INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?) @@ -515,16 +501,11 @@ async function createTestRole(db: Database, roleId: string, name?: string): Prom now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } async function createTestPermission( - db: Database, + db: SQLiteAdapter, permissionId: string, resource: string, action: string @@ -543,10 +524,5 @@ async function createTestPermission( now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } diff --git a/backend/test/properties/rbac/role-assignment-idempotence.property.test.ts b/backend/test/properties/rbac/role-assignment-idempotence.property.test.ts index 58558d44..9eed506c 100644 --- a/backend/test/properties/rbac/role-assignment-idempotence.property.test.ts +++ b/backend/test/properties/rbac/role-assignment-idempotence.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { UserService } from '../../../src/services/UserService'; import { AuthenticationService } from '../../../src/services/AuthenticationService'; import * as fc from 'fast-check'; @@ -21,14 +21,15 @@ import { randomUUID } from 'crypto'; * - No duplicate role assignments exist */ describe('Role Assignment Idempotence Properties', () => { - let db: Database; + let db: SQLiteAdapter; let userService: UserService; let authService: AuthenticationService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeRBACSchema(db); @@ -39,12 +40,7 @@ describe('Role Assignment Idempotence Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -251,7 +247,7 @@ describe('Role Assignment Idempotence Properties', () => { // Helper functions -async function initializeRBACSchema(db: Database): Promise { +async function initializeRBACSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -296,16 +292,11 @@ async function initializeRBACSchema(db: Database): Promise { const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } -async function createTestUser(db: Database, userId: string): Promise { +async function createTestUser(db: SQLiteAdapter, userId: string): Promise { const sql = ` INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, 1, 0, ?, ?) @@ -323,15 +314,10 @@ async function createTestUser(db: Database, userId: string): Promise { now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } -async function createTestRole(db: Database, roleId: string, name?: string): Promise { +async function createTestRole(db: SQLiteAdapter, roleId: string, name?: string): Promise { const sql = ` INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?) @@ -348,25 +334,16 @@ async function createTestRole(db: Database, roleId: string, name?: string): Prom now ]; - await new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(sql, params); } async function queryUserRoleAssignments( - db: Database, + db: SQLiteAdapter, userId: string, roleId: string ): Promise { const sql = 'SELECT COUNT(*) as count FROM user_roles WHERE userId = ? AND roleId = ?'; // pragma: allowlist secret - return new Promise((resolve, reject) => { - db.get(sql, [userId, roleId], (err, row: any) => { - if (err) reject(err); - else resolve(row.count); - }); - }); + const row = await db.queryOne<{ count: number }>(sql, [userId, roleId]); + return row?.count ?? 0; } diff --git a/backend/test/properties/user/email-uniqueness.property.test.ts b/backend/test/properties/user/email-uniqueness.property.test.ts index df2e3aa2..c31f4a32 100644 --- a/backend/test/properties/user/email-uniqueness.property.test.ts +++ b/backend/test/properties/user/email-uniqueness.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { UserService } from '../../../src/services/UserService'; import { AuthenticationService } from '../../../src/services/AuthenticationService'; import * as fc from 'fast-check'; @@ -19,14 +19,15 @@ import * as fc from 'fast-check'; * - No two users can have the same email address */ describe('Email Uniqueness Properties', () => { - let db: Database; + let db: SQLiteAdapter; let authService: AuthenticationService; let userService: UserService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -37,12 +38,7 @@ describe('Email Uniqueness Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -220,7 +216,7 @@ describe('Email Uniqueness Properties', () => { }); // Helper function to initialize schema -async function initializeSchema(db: Database): Promise { +async function initializeSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -317,16 +313,14 @@ async function initializeSchema(db: Database): Promise { INSERT INTO config (key, value, updatedAt) VALUES ('allow_self_registration', 'false', datetime('now')), ('default_new_user_role', 'role-viewer-001', datetime('now')); + + INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES + ('role-viewer-001', 'Viewer', 'Default viewer role', 1, datetime('now'), datetime('now')); `; const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } diff --git a/backend/test/properties/user/username-uniqueness.property.test.ts b/backend/test/properties/user/username-uniqueness.property.test.ts index 8e78ad0c..0f6a655f 100644 --- a/backend/test/properties/user/username-uniqueness.property.test.ts +++ b/backend/test/properties/user/username-uniqueness.property.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; +import { SQLiteAdapter } from '../../../src/database/SQLiteAdapter'; import { UserService } from '../../../src/services/UserService'; import { AuthenticationService } from '../../../src/services/AuthenticationService'; import * as fc from 'fast-check'; @@ -19,14 +19,15 @@ import * as fc from 'fast-check'; * - No two users can have the same username */ describe('Username Uniqueness Properties', () => { - let db: Database; + let db: SQLiteAdapter; let authService: AuthenticationService; let userService: UserService; const testJwtSecret = 'test-secret-key-for-testing-only'; // pragma: allowlist secret beforeEach(async () => { // Create in-memory database - db = new Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await initializeSchema(db); @@ -37,12 +38,7 @@ describe('Username Uniqueness Properties', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); /** @@ -220,7 +216,7 @@ describe('Username Uniqueness Properties', () => { }); // Helper function to initialize schema -async function initializeSchema(db: Database): Promise { +async function initializeSchema(db: SQLiteAdapter): Promise { const schema = ` CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, @@ -317,16 +313,14 @@ async function initializeSchema(db: Database): Promise { INSERT INTO config (key, value, updatedAt) VALUES ('allow_self_registration', 'false', datetime('now')), ('default_new_user_role', 'role-viewer-001', datetime('now')); + + INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES + ('role-viewer-001', 'Viewer', 'Default viewer role', 1, datetime('now'), datetime('now')); `; const statements = schema.split(';').map(s => s.trim()).filter(s => s.length > 0); for (const statement of statements) { - await new Promise((resolve, reject) => { - db.run(statement, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute(statement); } } diff --git a/backend/test/routes/auth.test.ts b/backend/test/routes/auth.test.ts index 657eb74a..e0a4cefe 100644 --- a/backend/test/routes/auth.test.ts +++ b/backend/test/routes/auth.test.ts @@ -4,8 +4,6 @@ import request from 'supertest'; import { createAuthRouter } from '../../src/routes/auth'; import { DatabaseService } from '../../src/database/DatabaseService'; import { SetupService } from '../../src/services/SetupService'; -import { Database } from 'sqlite3'; -import { initializeSchema } from '../../src/database/schema'; describe('Auth Routes - POST /api/auth/register', () => { let app: Express; @@ -608,21 +606,22 @@ describe('Auth Routes - POST /api/auth/register', () => { }); it('should handle database errors gracefully', async () => { - // Create a new database instance that we can close - const tempDb = new Database(':memory:'); - await initializeSchema(tempDb); - - const tempDatabaseService = { - getConnection: () => tempDb, - isInitialized: () => true, - } as DatabaseService; + // Create a temporary database service that we can close + const tempDatabaseService = new DatabaseService(':memory:'); + await tempDatabaseService.initialize(); + + const tempSetupService = new SetupService(tempDatabaseService.getConnection()); + await tempSetupService.saveConfig({ + allowSelfRegistration: true, + defaultNewUserRole: null, + }); const tempApp = express(); tempApp.use(express.json()); tempApp.use('/api/auth', createAuthRouter(tempDatabaseService)); // Close the database to simulate error - await closeDatabase(tempDb); + await tempDatabaseService.close(); const userData = { username: 'testuser', @@ -642,151 +641,6 @@ describe('Auth Routes - POST /api/auth/register', () => { }); }); -// Helper functions -async function initializeSchema(db: Database): Promise { - return new Promise((resolve, reject) => { - db.exec(` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT UNIQUE NOT NULL, - email TEXT UNIQUE NOT NULL, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER DEFAULT 1, - isAdmin INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT UNIQUE NOT NULL, - description TEXT, - isBuiltIn INTEGER DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE permissions ( - id TEXT PRIMARY KEY, - resource TEXT NOT NULL, - action TEXT NOT NULL, - description TEXT, - createdAt TEXT NOT NULL, - UNIQUE(resource, action) - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (groupId) REFERENCES groups(id) - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id), - FOREIGN KEY (roleId) REFERENCES roles(id) - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id), - FOREIGN KEY (roleId) REFERENCES roles(id) - ); - - CREATE TABLE role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id), - FOREIGN KEY (permissionId) REFERENCES permissions(id) - ); - - CREATE TABLE revoked_tokens ( - token TEXT PRIMARY KEY, - userId TEXT NOT NULL, - revokedAt TEXT NOT NULL, - expiresAt TEXT NOT NULL - ); - - CREATE TABLE account_lockouts ( - username TEXT PRIMARY KEY, - lockoutType TEXT NOT NULL, - lockedAt TEXT NOT NULL, - lockedUntil TEXT, - failedAttempts INTEGER NOT NULL, - lastAttemptAt TEXT NOT NULL - ); - - CREATE TABLE failed_login_attempts ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - username TEXT NOT NULL, - attemptedAt TEXT NOT NULL, - ipAddress TEXT, - reason TEXT NOT NULL - ); - - CREATE TABLE audit_logs ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp TEXT NOT NULL, - userId TEXT, - username TEXT, - action TEXT NOT NULL, - resource TEXT, - resourceId TEXT, - ipAddress TEXT, - userAgent TEXT, - success INTEGER NOT NULL, - errorMessage TEXT, - metadata TEXT - ); - - CREATE TABLE config ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - -- Insert default config to allow self-registration for tests - INSERT INTO config (key, value, updatedAt) VALUES - ('allow_self_registration', 'true', datetime('now')), - ('default_new_user_role', 'role-viewer-001', datetime('now')); - `, (err) => { - if (err) reject(err); - else resolve(); - }); - }); -} - -async function closeDatabase(db: Database): Promise { - return new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); -} - describe('Auth Routes - POST /api/auth/login', () => { let app: Express; let databaseService: DatabaseService; @@ -1009,16 +863,10 @@ describe('Auth Routes - POST /api/auth/login', () => { .expect(201); // Deactivate the user directly in database - await new Promise((resolve, reject) => { - databaseService.getConnection().run( - 'UPDATE users SET isActive = 0 WHERE username = ?', - ['testuser'], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + 'UPDATE users SET isActive = 0 WHERE username = ?', + ['testuser'] + ); // Try to login const loginData = { @@ -1273,21 +1121,22 @@ describe('Auth Routes - POST /api/auth/login', () => { }); it('should handle database errors gracefully', async () => { - // Create a new database instance that we can close - const tempDb = new Database(':memory:'); - await initializeSchema(tempDb); - - const tempDatabaseService = { - getConnection: () => tempDb, - isInitialized: () => true, - } as DatabaseService; + // Create a temporary database service that we can close + const tempDatabaseService = new DatabaseService(':memory:'); + await tempDatabaseService.initialize(); + + const tempSetupService = new SetupService(tempDatabaseService.getConnection()); + await tempSetupService.saveConfig({ + allowSelfRegistration: true, + defaultNewUserRole: null, + }); const tempApp = express(); tempApp.use(express.json()); tempApp.use('/api/auth', createAuthRouter(tempDatabaseService)); // Close the database to simulate error - await closeDatabase(tempDb); + await tempDatabaseService.close(); const loginData = { username: 'testuser', @@ -1700,12 +1549,7 @@ describe('Auth Routes - POST /api/auth/logout', () => { const token = loginResponse.body.token; // Close the database to simulate error - await new Promise((resolve, reject) => { - databaseService.getConnection().close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await databaseService.getConnection().close(); // Logout should fail at middleware level (token verification fails) // because the database is needed to check revocation @@ -1744,16 +1588,10 @@ describe('Auth Routes - POST /api/auth/logout', () => { const token = loginResponse.body.token; // Deactivate the user - await new Promise((resolve, reject) => { - databaseService.getConnection().run( - 'UPDATE users SET isActive = 0 WHERE username = ?', - ['testuser'], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + 'UPDATE users SET isActive = 0 WHERE username = ?', + ['testuser'] + ); // Logout should still work (token is still valid, just revoke it) const response = await request(app) @@ -2157,16 +1995,10 @@ describe('Auth Routes - POST /api/auth/refresh', () => { const refreshToken = loginResponse.body.refreshToken; // Deactivate the user - await new Promise((resolve, reject) => { - databaseService.getConnection().run( - 'UPDATE users SET isActive = 0 WHERE username = ?', - ['testuser'], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + 'UPDATE users SET isActive = 0 WHERE username = ?', + ['testuser'] + ); // Try to refresh token const response = await request(app) @@ -2290,12 +2122,7 @@ describe('Auth Routes - POST /api/auth/refresh', () => { const refreshToken = loginResponse.body.refreshToken; // Close the database to simulate error - await new Promise((resolve, reject) => { - databaseService.getConnection().close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await databaseService.getConnection().close(); // Refresh should fail gracefully const response = await request(app) diff --git a/backend/test/routes/aws.test.ts b/backend/test/routes/aws.test.ts new file mode 100644 index 00000000..bb79f3d1 --- /dev/null +++ b/backend/test/routes/aws.test.ts @@ -0,0 +1,358 @@ +import express, { type Express } from "express"; +import request from "supertest"; +import { describe, it, expect, beforeAll, afterAll, beforeEach, afterEach, vi } from "vitest"; +import { createAWSRouter } from "../../src/routes/integrations/aws"; +import { DatabaseService } from "../../src/database/DatabaseService"; +import { AuthenticationService } from "../../src/services/AuthenticationService"; +import { UserService } from "../../src/services/UserService"; +import { PermissionService } from "../../src/services/PermissionService"; +import { RoleService } from "../../src/services/RoleService"; +import { AWSAuthenticationError } from "../../src/integrations/aws/types"; +import type { AWSPlugin } from "../../src/integrations/aws/AWSPlugin"; + +/** + * Create a mock AWSPlugin with vi.fn() stubs for all methods used by the router + */ +function createMockAWSPlugin(): AWSPlugin { + return { + getInventory: vi.fn().mockResolvedValue([]), + executeAction: vi.fn().mockResolvedValue({ + id: "aws-test-1", + type: "task", + targetNodes: ["new"], + action: "provision", + status: "success", + startedAt: new Date().toISOString(), + completedAt: new Date().toISOString(), + results: [], + }), + getRegions: vi.fn().mockResolvedValue(["us-east-1", "us-west-2"]), + getInstanceTypes: vi.fn().mockResolvedValue([]), + getAMIs: vi.fn().mockResolvedValue([]), + getVPCs: vi.fn().mockResolvedValue([]), + getSubnets: vi.fn().mockResolvedValue([]), + getSecurityGroups: vi.fn().mockResolvedValue([]), + getKeyPairs: vi.fn().mockResolvedValue([]), + } as unknown as AWSPlugin; +} + +describe("AWS Router", () => { + let app: Express; + let databaseService: DatabaseService; + let authService: AuthenticationService; + let userService: UserService; + let permissionService: PermissionService; + let roleService: RoleService; + let mockPlugin: AWSPlugin; + let adminToken: string; + let adminUserId: string; + + beforeAll(async () => { + databaseService = new DatabaseService(":memory:"); + await databaseService.initialize(); + + const jwtSecret = "test-secret-key"; // pragma: allowlist secret + process.env.JWT_SECRET = jwtSecret; + authService = new AuthenticationService(databaseService.getConnection(), jwtSecret); + userService = new UserService(databaseService.getConnection(), authService); + permissionService = new PermissionService(databaseService.getConnection()); + roleService = new RoleService(databaseService.getConnection()); + }); + + afterAll(async () => { + await databaseService.close(); + }); + + beforeEach(async () => { + mockPlugin = createMockAWSPlugin(); + + // Create admin user with aws permissions + const adminUser = await userService.createUser({ + username: "aws_admin", + email: "aws_admin@test.com", + password: "AdminPass123!", + firstName: "AWS", + lastName: "Admin", + isAdmin: false, + }); + adminUserId = adminUser.id; + + // Ensure aws permissions exist + const permNames = [ + { resource: "aws", action: "read", description: "Read AWS resources" }, + { resource: "aws", action: "provision", description: "Provision AWS resources" }, + { resource: "aws", action: "lifecycle", description: "AWS lifecycle actions" }, + ]; + + const permIds: string[] = []; + for (const p of permNames) { + try { + const perm = await permissionService.createPermission(p); + permIds.push(perm.id); + } catch { + const all = await permissionService.listPermissions(); + const found = all.items.find( + (x) => x.resource === p.resource && x.action === p.action + ); + if (found) permIds.push(found.id); + } + } + + // Create role with aws permissions + const awsRole = await roleService.createRole({ + name: "AWSAdmin", + description: "Can manage AWS resources", + }); + + for (const pid of permIds) { + await roleService.assignPermissionToRole(awsRole.id, pid); + } + await userService.assignRoleToUser(adminUserId, awsRole.id); + + adminToken = await authService.generateToken(adminUser); + + // Build app with the mock plugin + app = express(); + app.use(express.json()); + app.use("/api/integrations/aws", createAWSRouter(mockPlugin)); + }); + + afterEach(async () => { + const db = databaseService.getConnection(); + await db.execute("DELETE FROM user_roles"); + await db.execute("DELETE FROM role_permissions"); + await db.execute('DELETE FROM users WHERE username = "aws_admin"'); + await db.execute('DELETE FROM roles WHERE name = "AWSAdmin"'); + }); + + describe("GET /api/integrations/aws/inventory", () => { + it("should return inventory from the plugin", async () => { + const mockNodes = [ + { id: "aws:us-east-1:i-abc123", name: "test-instance", uri: "aws:us-east-1:i-abc123" }, + ]; + (mockPlugin.getInventory as ReturnType).mockResolvedValue(mockNodes); + + const response = await request(app).get("/api/integrations/aws/inventory"); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("inventory"); + expect(response.body.inventory).toEqual(mockNodes); + }); + + it("should return 401 when AWS auth fails", async () => { + (mockPlugin.getInventory as ReturnType).mockRejectedValue( + new AWSAuthenticationError("Invalid credentials") + ); + + const response = await request(app).get("/api/integrations/aws/inventory"); + + expect(response.status).toBe(401); + expect(response.body.error.code).toBe("UNAUTHORIZED"); + }); + + it("should return 500 on generic error", async () => { + (mockPlugin.getInventory as ReturnType).mockRejectedValue( + new Error("Something went wrong") + ); + + const response = await request(app).get("/api/integrations/aws/inventory"); + + expect(response.status).toBe(500); + expect(response.body.error.code).toBe("INTERNAL_SERVER_ERROR"); + }); + }); + + describe("POST /api/integrations/aws/provision", () => { + it("should provision an instance with valid params", async () => { + const response = await request(app) + .post("/api/integrations/aws/provision") + .send({ imageId: "ami-12345", instanceType: "t2.micro" }); + + expect(response.status).toBe(201); + expect(response.body).toHaveProperty("result"); + expect(response.body.result.status).toBe("success"); + }); + + it("should return 400 when imageId is missing", async () => { + const response = await request(app) + .post("/api/integrations/aws/provision") + .send({ instanceType: "t2.micro" }); + + expect(response.status).toBe(400); + expect(response.body.error.code).toBe("VALIDATION_ERROR"); + }); + + it("should return 401 on AWS auth error", async () => { + (mockPlugin.executeAction as ReturnType).mockRejectedValue( + new AWSAuthenticationError("Expired token") + ); + + const response = await request(app) + .post("/api/integrations/aws/provision") + .send({ imageId: "ami-12345" }); + + expect(response.status).toBe(401); + }); + }); + + describe("POST /api/integrations/aws/lifecycle", () => { + it("should execute a lifecycle action", async () => { + const response = await request(app) + .post("/api/integrations/aws/lifecycle") + .send({ instanceId: "i-abc123", action: "stop" }); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("result"); + }); + + it("should return 400 for invalid action", async () => { + const response = await request(app) + .post("/api/integrations/aws/lifecycle") + .send({ instanceId: "i-abc123", action: "destroy" }); + + expect(response.status).toBe(400); + expect(response.body.error.code).toBe("VALIDATION_ERROR"); + }); + + it("should return 400 when instanceId is missing", async () => { + const response = await request(app) + .post("/api/integrations/aws/lifecycle") + .send({ action: "start" }); + + expect(response.status).toBe(400); + }); + + it("should include region in target when provided", async () => { + await request(app) + .post("/api/integrations/aws/lifecycle") + .send({ instanceId: "i-abc123", action: "reboot", region: "eu-west-1" }); + + expect(mockPlugin.executeAction).toHaveBeenCalledWith( + expect.objectContaining({ + target: "aws:eu-west-1:i-abc123", + action: "reboot", + }) + ); + }); + }); + + describe("GET /api/integrations/aws/regions", () => { + it("should return regions", async () => { + const response = await request(app).get("/api/integrations/aws/regions"); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("regions"); + expect(response.body.regions).toEqual(["us-east-1", "us-west-2"]); + }); + }); + + describe("GET /api/integrations/aws/instance-types", () => { + it("should return instance types", async () => { + const response = await request(app).get("/api/integrations/aws/instance-types"); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("instanceTypes"); + }); + + it("should pass region query param to plugin", async () => { + await request(app) + .get("/api/integrations/aws/instance-types") + .query({ region: "eu-west-1" }); + + expect(mockPlugin.getInstanceTypes).toHaveBeenCalledWith("eu-west-1"); + }); + }); + + describe("GET /api/integrations/aws/amis", () => { + it("should return AMIs for a region", async () => { + const response = await request(app) + .get("/api/integrations/aws/amis") + .query({ region: "us-east-1" }); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("amis"); + }); + + it("should return 400 when region is missing", async () => { + const response = await request(app).get("/api/integrations/aws/amis"); + + expect(response.status).toBe(400); + expect(response.body.error.code).toBe("VALIDATION_ERROR"); + }); + }); + + describe("GET /api/integrations/aws/vpcs", () => { + it("should return VPCs for a region", async () => { + const response = await request(app) + .get("/api/integrations/aws/vpcs") + .query({ region: "us-east-1" }); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("vpcs"); + }); + + it("should return 400 when region is missing", async () => { + const response = await request(app).get("/api/integrations/aws/vpcs"); + expect(response.status).toBe(400); + }); + }); + + describe("GET /api/integrations/aws/subnets", () => { + it("should return subnets for a region", async () => { + const response = await request(app) + .get("/api/integrations/aws/subnets") + .query({ region: "us-east-1" }); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("subnets"); + }); + + it("should pass vpcId filter when provided", async () => { + await request(app) + .get("/api/integrations/aws/subnets") + .query({ region: "us-east-1", vpcId: "vpc-123" }); + + expect(mockPlugin.getSubnets).toHaveBeenCalledWith("us-east-1", "vpc-123"); + }); + + it("should return 400 when region is missing", async () => { + const response = await request(app).get("/api/integrations/aws/subnets"); + expect(response.status).toBe(400); + }); + }); + + describe("GET /api/integrations/aws/security-groups", () => { + it("should return security groups for a region", async () => { + const response = await request(app) + .get("/api/integrations/aws/security-groups") + .query({ region: "us-east-1" }); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("securityGroups"); + }); + + it("should pass vpcId filter when provided", async () => { + await request(app) + .get("/api/integrations/aws/security-groups") + .query({ region: "us-east-1", vpcId: "vpc-456" }); + + expect(mockPlugin.getSecurityGroups).toHaveBeenCalledWith("us-east-1", "vpc-456"); + }); + }); + + describe("GET /api/integrations/aws/key-pairs", () => { + it("should return key pairs for a region", async () => { + const response = await request(app) + .get("/api/integrations/aws/key-pairs") + .query({ region: "us-east-1" }); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("keyPairs"); + }); + + it("should return 400 when region is missing", async () => { + const response = await request(app).get("/api/integrations/aws/key-pairs"); + expect(response.status).toBe(400); + }); + }); +}); diff --git a/backend/test/routes/groups.test.ts b/backend/test/routes/groups.test.ts index 0b53d7c0..1273172a 100644 --- a/backend/test/routes/groups.test.ts +++ b/backend/test/routes/groups.test.ts @@ -105,22 +105,13 @@ describe('Groups Router - POST /api/groups', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM groups; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM groups'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Authentication and Authorization', () => { @@ -303,22 +294,13 @@ describe('Groups Router - GET /api/groups', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM groups; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM groups'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Authentication and Authorization', () => { @@ -477,24 +459,15 @@ describe('Groups Router - GET /api/groups/:id', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM groups; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM groups'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Success Cases', () => { @@ -602,22 +575,13 @@ describe('Groups Router - PUT /api/groups/:id', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM groups; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM groups'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Success Cases', () => { @@ -760,22 +724,13 @@ describe('Groups Router - DELETE /api/groups/:id', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM groups; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM groups'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Authentication and Authorization', () => { @@ -923,24 +878,15 @@ describe('Groups Router - POST /api/groups/:id/roles/:roleId', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM groups; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM groups'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Authentication and Authorization', () => { @@ -1152,24 +1098,15 @@ describe('Groups Router - DELETE /api/groups/:id/roles/:roleId', () => { }); afterEach(async () => { - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM groups; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM groups'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Authentication and Authorization', () => { diff --git a/backend/test/routes/journal.test.ts b/backend/test/routes/journal.test.ts new file mode 100644 index 00000000..78746477 --- /dev/null +++ b/backend/test/routes/journal.test.ts @@ -0,0 +1,325 @@ +import express, { Express } from "express"; +import request from "supertest"; +import { createJournalRouter } from "../../src/routes/journal"; +import { DatabaseService } from "../../src/database/DatabaseService"; +import { AuthenticationService } from "../../src/services/AuthenticationService"; +import { UserService } from "../../src/services/UserService"; +import { PermissionService } from "../../src/services/PermissionService"; +import { RoleService } from "../../src/services/RoleService"; +import { JournalService } from "../../src/services/journal/JournalService"; + +describe("Journal Router", () => { + let app: Express; + let databaseService: DatabaseService; + let authService: AuthenticationService; + let userService: UserService; + let permissionService: PermissionService; + let roleService: RoleService; + let journalService: JournalService; + let adminToken: string; + let adminUserId: string; + let regularUserToken: string; + + beforeAll(async () => { + databaseService = new DatabaseService(":memory:"); + await databaseService.initialize(); + + const jwtSecret = "test-secret-key"; // pragma: allowlist secret + process.env.JWT_SECRET = jwtSecret; + authService = new AuthenticationService(databaseService.getConnection(), jwtSecret); + userService = new UserService(databaseService.getConnection(), authService); + permissionService = new PermissionService(databaseService.getConnection()); + roleService = new RoleService(databaseService.getConnection()); + journalService = new JournalService(databaseService.getConnection()); + + app = express(); + app.use(express.json()); + app.use("/api/journal", createJournalRouter(databaseService)); + }); + + afterAll(async () => { + await databaseService.close(); + }); + + beforeEach(async () => { + // Create admin user with journal permissions + const adminUser = await userService.createUser({ + username: "journal_admin", + email: "journal_admin@test.com", + password: "AdminPass123!", + firstName: "Journal", + lastName: "Admin", + isAdmin: false, + }); + adminUserId = adminUser.id; + + // Ensure journal:read and journal:note permissions exist + let journalReadPerm; + let journalNotePerm; + try { + journalReadPerm = await permissionService.createPermission({ + resource: "journal", + action: "read", + description: "Read journal entries", + }); + } catch { + const all = await permissionService.listPermissions(); + journalReadPerm = all.items.find( + (p) => p.resource === "journal" && p.action === "read" + ); + } + try { + journalNotePerm = await permissionService.createPermission({ + resource: "journal", + action: "note", + description: "Add journal notes", + }); + } catch { + const all = await permissionService.listPermissions(); + journalNotePerm = all.items.find( + (p) => p.resource === "journal" && p.action === "note" + ); + } + + // Create role with journal permissions + const journalRole = await roleService.createRole({ + name: "JournalAdmin", + description: "Can read and write journal", + }); + + if (journalReadPerm) { + await roleService.assignPermissionToRole(journalRole.id, journalReadPerm.id); + } + if (journalNotePerm) { + await roleService.assignPermissionToRole(journalRole.id, journalNotePerm.id); + } + await userService.assignRoleToUser(adminUserId, journalRole.id); + + adminToken = await authService.generateToken(adminUser); + + // Create regular user without journal permissions + const regularUser = await userService.createUser({ + username: "journal_regular", + email: "journal_regular@test.com", + password: "RegularPass123!", + firstName: "Regular", + lastName: "User", + isAdmin: false, + }); + + regularUserToken = await authService.generateToken(regularUser); + + // Seed some journal entries for testing + await journalService.recordEvent({ + nodeId: "node-1", + nodeUri: "proxmox:node-1", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Provisioned VM node-1", + userId: adminUserId, + }); + await journalService.recordEvent({ + nodeId: "node-1", + nodeUri: "proxmox:node-1", + eventType: "start", + source: "proxmox", + action: "start_vm", + summary: "Started VM node-1", + }); + await journalService.recordEvent({ + nodeId: "node-2", + nodeUri: "aws:node-2", + eventType: "provision", + source: "aws", + action: "run_instances", + summary: "Launched EC2 instance node-2", + }); + }); + + afterEach(async () => { + const db = databaseService.getConnection(); + await db.execute("DELETE FROM user_roles"); + await db.execute("DELETE FROM role_permissions"); + await db.execute('DELETE FROM users WHERE username IN ("journal_admin", "journal_regular")'); + await db.execute('DELETE FROM roles WHERE name = "JournalAdmin"'); + await db.execute("DELETE FROM journal_entries"); + }); + + describe("GET /api/journal/:nodeId", () => { + it("should return timeline entries for a node", async () => { + const response = await request(app) + .get("/api/journal/node-1") + .set("Authorization", `Bearer ${adminToken}`); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("entries"); + expect(Array.isArray(response.body.entries)).toBe(true); + expect(response.body.entries.length).toBe(2); + }); + + it("should return 401 when not authenticated", async () => { + const response = await request(app).get("/api/journal/node-1"); + expect(response.status).toBe(401); + }); + + it("should return 403 when user lacks journal:read permission", async () => { + const response = await request(app) + .get("/api/journal/node-1") + .set("Authorization", `Bearer ${regularUserToken}`); + expect(response.status).toBe(403); + }); + + it("should return empty entries for unknown node", async () => { + const response = await request(app) + .get("/api/journal/unknown-node") + .set("Authorization", `Bearer ${adminToken}`); + + expect(response.status).toBe(200); + expect(response.body.entries).toEqual([]); + }); + + it("should support pagination via limit and offset", async () => { + const response = await request(app) + .get("/api/journal/node-1") + .set("Authorization", `Bearer ${adminToken}`) + .query({ limit: 1, offset: 0 }); + + expect(response.status).toBe(200); + expect(response.body.entries.length).toBe(1); + }); + + it("should return 400 for invalid limit", async () => { + const response = await request(app) + .get("/api/journal/node-1") + .set("Authorization", `Bearer ${adminToken}`) + .query({ limit: 0 }); + + expect(response.status).toBe(400); + expect(response.body.error.code).toBe("VALIDATION_ERROR"); + }); + }); + + describe("POST /api/journal/:nodeId/notes", () => { + it("should add a manual note to a node", async () => { + const response = await request(app) + .post("/api/journal/node-1/notes") + .set("Authorization", `Bearer ${adminToken}`) + .send({ content: "This is a test note" }); + + expect(response.status).toBe(201); + expect(response.body).toHaveProperty("id"); + }); + + it("should return 401 when not authenticated", async () => { + const response = await request(app) + .post("/api/journal/node-1/notes") + .send({ content: "Unauthorized note" }); + expect(response.status).toBe(401); + }); + + it("should return 403 when user lacks journal:note permission", async () => { + const response = await request(app) + .post("/api/journal/node-1/notes") + .set("Authorization", `Bearer ${regularUserToken}`) + .send({ content: "Forbidden note" }); + expect(response.status).toBe(403); + }); + + it("should return 400 when content is missing", async () => { + const response = await request(app) + .post("/api/journal/node-1/notes") + .set("Authorization", `Bearer ${adminToken}`) + .send({}); + + expect(response.status).toBe(400); + expect(response.body.error.code).toBe("VALIDATION_ERROR"); + }); + + it("should return 400 when content is empty string", async () => { + const response = await request(app) + .post("/api/journal/node-1/notes") + .set("Authorization", `Bearer ${adminToken}`) + .send({ content: "" }); + + expect(response.status).toBe(400); + expect(response.body.error.code).toBe("VALIDATION_ERROR"); + }); + + it("should persist the note and appear in timeline", async () => { + await request(app) + .post("/api/journal/node-1/notes") + .set("Authorization", `Bearer ${adminToken}`) + .send({ content: "Persisted note" }); + + const timeline = await request(app) + .get("/api/journal/node-1") + .set("Authorization", `Bearer ${adminToken}`); + + expect(timeline.status).toBe(200); + const noteEntry = timeline.body.entries.find( + (e: { summary: string }) => e.summary === "Persisted note" + ); + expect(noteEntry).toBeDefined(); + expect(noteEntry.eventType).toBe("note"); + expect(noteEntry.source).toBe("user"); + }); + }); + + describe("GET /api/journal/search", () => { + it("should search journal entries by query", async () => { + const response = await request(app) + .get("/api/journal/search") + .set("Authorization", `Bearer ${adminToken}`) + .query({ q: "Provisioned" }); + + expect(response.status).toBe(200); + expect(response.body).toHaveProperty("entries"); + expect(response.body.entries.length).toBeGreaterThanOrEqual(1); + }); + + it("should return 401 when not authenticated", async () => { + const response = await request(app) + .get("/api/journal/search") + .query({ q: "test" }); + expect(response.status).toBe(401); + }); + + it("should return 403 when user lacks journal:read permission", async () => { + const response = await request(app) + .get("/api/journal/search") + .set("Authorization", `Bearer ${regularUserToken}`) + .query({ q: "test" }); + expect(response.status).toBe(403); + }); + + it("should return 400 when query parameter q is missing", async () => { + const response = await request(app) + .get("/api/journal/search") + .set("Authorization", `Bearer ${adminToken}`); + + expect(response.status).toBe(400); + expect(response.body.error.code).toBe("VALIDATION_ERROR"); + }); + + it("should return empty results for non-matching query", async () => { + const response = await request(app) + .get("/api/journal/search") + .set("Authorization", `Bearer ${adminToken}`) + .query({ q: "nonexistent_xyz_query" }); + + expect(response.status).toBe(200); + expect(response.body.entries).toEqual([]); + }); + + it("should support pagination in search", async () => { + const response = await request(app) + .get("/api/journal/search") + .set("Authorization", `Bearer ${adminToken}`) + .query({ q: "node", limit: 1, offset: 0 }); + + expect(response.status).toBe(200); + expect(response.body.entries.length).toBeLessThanOrEqual(1); + }); + }); +}); diff --git a/backend/test/routes/permissions.test.ts b/backend/test/routes/permissions.test.ts index b756b1aa..11042f7f 100644 --- a/backend/test/routes/permissions.test.ts +++ b/backend/test/routes/permissions.test.ts @@ -112,11 +112,11 @@ describe('Permissions Router', () => { afterEach(async () => { // Clean up test data const db = databaseService.getConnection(); - db.exec('DELETE FROM user_roles'); - db.exec('DELETE FROM role_permissions'); - db.exec('DELETE FROM users WHERE username IN ("admin_user", "regular_user")'); - db.exec('DELETE FROM roles WHERE name = "PermissionAdmin"'); - db.exec('DELETE FROM permissions WHERE resource NOT IN ("users", "groups", "roles", "permissions", "ansible", "bolt", "puppetdb")'); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM users WHERE username IN ("admin_user", "regular_user")'); + await db.execute('DELETE FROM roles WHERE name = "PermissionAdmin"'); + await db.execute('DELETE FROM permissions WHERE resource NOT IN ("users", "groups", "roles", "permissions", "ansible", "bolt", "puppetdb")'); }); describe('POST /api/permissions', () => { diff --git a/backend/test/routes/roles-permissions.test.ts b/backend/test/routes/roles-permissions.test.ts index 024aa5b0..cbb0bfdd 100644 --- a/backend/test/routes/roles-permissions.test.ts +++ b/backend/test/routes/roles-permissions.test.ts @@ -120,36 +120,11 @@ describe('Roles Router - Role-Permission Association Routes', () => { afterEach(async () => { // Clean up database after each test const db = databaseService.getConnection(); - await new Promise((resolve, reject) => { - db.run('DELETE FROM user_roles', (err) => { - if (err) reject(err); - else resolve(); - }); - }); - await new Promise((resolve, reject) => { - db.run('DELETE FROM role_permissions', (err) => { - if (err) reject(err); - else resolve(); - }); - }); - await new Promise((resolve, reject) => { - db.run('DELETE FROM roles WHERE isBuiltIn = 0', (err) => { - if (err) reject(err); - else resolve(); - }); - }); - await new Promise((resolve, reject) => { - db.run('DELETE FROM permissions WHERE resource NOT IN (\'ansible\', \'bolt\', \'puppetdb\', \'users\', \'groups\', \'roles\')', (err) => { - if (err) reject(err); - else resolve(); - }); - }); - await new Promise((resolve, reject) => { - db.run('DELETE FROM users', (err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute("DELETE FROM permissions WHERE resource NOT IN ('ansible', 'bolt', 'puppetdb', 'users', 'groups', 'roles')"); + await db.execute('DELETE FROM users'); }); describe('POST /api/roles/:id/permissions/:permissionId', () => { diff --git a/backend/test/routes/users.test.ts b/backend/test/routes/users.test.ts index 7cc219ae..48594f05 100644 --- a/backend/test/routes/users.test.ts +++ b/backend/test/routes/users.test.ts @@ -12,15 +12,9 @@ import { GroupService } from '../../src/services/GroupService'; // Helper function to disable default role assignment in tests async function disableDefaultRoleAssignment(databaseService: DatabaseService): Promise { - await new Promise((resolve, reject) => { - databaseService.getConnection().run( - `INSERT OR REPLACE INTO config (key, value, updatedAt) VALUES ('default_new_user_role', '', datetime('now'))`, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + "INSERT OR REPLACE INTO config (key, value, updatedAt) VALUES ('default_new_user_role', '', datetime('now'))" + ); } describe('Users Router - GET /api/users', () => { @@ -44,15 +38,9 @@ describe('Users Router - GET /api/users', () => { await disableDefaultRoleAssignment(databaseService); // Disable default role assignment for all tests - await new Promise((resolve, reject) => { - databaseService.getConnection().run( - `INSERT OR REPLACE INTO config (key, value, updatedAt) VALUES ('default_new_user_role', '', datetime('now'))`, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + "INSERT OR REPLACE INTO config (key, value, updatedAt) VALUES ('default_new_user_role', '', datetime('now'))" + ); // Initialize services const jwtSecret = 'test-secret-key'; // pragma: allowlist secret @@ -133,21 +121,12 @@ describe('Users Router - GET /api/users', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); }); describe('Authentication and Authorization', () => { @@ -492,24 +471,15 @@ describe('Users Router - GET /api/users/:id', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - DELETE FROM groups; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); + await db.execute('DELETE FROM groups'); }); describe('Authentication and Authorization', () => { @@ -615,16 +585,10 @@ describe('Users Router - GET /api/users/:id', () => { // Create a group const db = databaseService.getConnection(); const groupId = randomUUID(); - await new Promise((resolve, reject) => { - db.run( - 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', - [groupId, 'Test Group', 'Test group description', new Date().toISOString(), new Date().toISOString()], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', + [groupId, 'Test Group', 'Test group description', new Date().toISOString(), new Date().toISOString()] + ); // Add user to group await userService.addUserToGroup(testUserId, groupId); @@ -671,27 +635,15 @@ describe('Users Router - GET /api/users/:id', () => { const group1Id = randomUUID(); const group2Id = randomUUID(); - await new Promise((resolve, reject) => { - db.run( - 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', - [group1Id, 'Group 1', 'First group', new Date().toISOString(), new Date().toISOString()], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', + [group1Id, 'Group 1', 'First group', new Date().toISOString(), new Date().toISOString()] + ); - await new Promise((resolve, reject) => { - db.run( - 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', - [group2Id, 'Group 2', 'Second group', new Date().toISOString(), new Date().toISOString()], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', + [group2Id, 'Group 2', 'Second group', new Date().toISOString(), new Date().toISOString()] + ); // Add user to groups await userService.addUserToGroup(testUserId, group1Id); @@ -866,24 +818,15 @@ describe('Users Router - PUT /api/users/:id', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - DELETE FROM groups; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); + await db.execute('DELETE FROM groups'); }); describe('Authentication and Authorization', () => { @@ -1466,24 +1409,15 @@ describe('Users Router - DELETE /api/users/:id', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - DELETE FROM groups; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); + await db.execute('DELETE FROM groups'); }); describe('Authentication and Authorization', () => { @@ -1650,16 +1584,10 @@ describe('Users Router - DELETE /api/users/:id', () => { // Create a group and add user to it const db = databaseService.getConnection(); const groupId = randomUUID(); - await new Promise((resolve, reject) => { - db.run( - 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', - [groupId, 'Test Group', 'Test group description', new Date().toISOString(), new Date().toISOString()], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', + [groupId, 'Test Group', 'Test group description', new Date().toISOString(), new Date().toISOString()] + ); await userService.addUserToGroup(testUserId, groupId); @@ -1696,16 +1624,10 @@ describe('Users Router - DELETE /api/users/:id', () => { // Create a group const db = databaseService.getConnection(); const groupId = randomUUID(); - await new Promise((resolve, reject) => { - db.run( - 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', - [groupId, 'Test Group', 'Test group description', new Date().toISOString(), new Date().toISOString()], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + 'INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)', + [groupId, 'Test Group', 'Test group description', new Date().toISOString(), new Date().toISOString()] + ); await userService.addUserToGroup(testUserId, groupId); @@ -1878,24 +1800,15 @@ describe('Users Router - POST /api/users/:id/groups/:groupId', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - DELETE FROM groups; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); + await db.execute('DELETE FROM groups'); }); describe('Authentication and Authorization', () => { @@ -2302,24 +2215,15 @@ describe('Users Router - DELETE /api/users/:id/groups/:groupId', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - DELETE FROM groups; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); + await db.execute('DELETE FROM groups'); }); describe('Authentication and Authorization', () => { @@ -2763,24 +2667,15 @@ describe('Users Router - POST /api/users/:id/roles/:roleId', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - DELETE FROM groups; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); + await db.execute('DELETE FROM groups'); }); describe('Authentication and Authorization', () => { @@ -3239,24 +3134,15 @@ describe('Users Router - DELETE /api/users/:id/roles/:roleId', () => { afterEach(async () => { // Clean up database after each test - await new Promise((resolve, reject) => { - databaseService.getConnection().exec( - ` - DELETE FROM user_roles; - DELETE FROM role_permissions; - DELETE FROM user_groups; - DELETE FROM group_roles; - DELETE FROM users; - DELETE FROM roles WHERE isBuiltIn = 0; - DELETE FROM permissions; - DELETE FROM groups; - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + const db = databaseService.getConnection(); + await db.execute('DELETE FROM user_roles'); + await db.execute('DELETE FROM role_permissions'); + await db.execute('DELETE FROM user_groups'); + await db.execute('DELETE FROM group_roles'); + await db.execute('DELETE FROM users'); + await db.execute("DELETE FROM roles WHERE isBuiltIn = 0"); + await db.execute('DELETE FROM permissions'); + await db.execute('DELETE FROM groups'); }); describe('Authentication and Authorization', () => { diff --git a/backend/test/services/AuditLoggingService.test.ts b/backend/test/services/AuditLoggingService.test.ts index a415a51f..ed8512ca 100644 --- a/backend/test/services/AuditLoggingService.test.ts +++ b/backend/test/services/AuditLoggingService.test.ts @@ -1,38 +1,65 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import sqlite3 from 'sqlite3'; +import { SQLiteAdapter } from '../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../src/database/DatabaseAdapter'; import { AuditLoggingService, AuditEventType, AuditAction, AuditResult } from '../../src/services/AuditLoggingService'; import { readFileSync } from 'fs'; import { join } from 'path'; describe('AuditLoggingService', () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let auditLogger: AuditLoggingService; beforeEach(async () => { - // Create in-memory database - db = new sqlite3.Database(':memory:'); - - // Load and execute audit schema + // Create in-memory database via SQLiteAdapter + db = new SQLiteAdapter(':memory:'); + await db.initialize(); + + // Create users table first (required by audit_logs foreign keys) + await db.execute(` + CREATE TABLE IF NOT EXISTS users ( + id TEXT PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT NOT NULL UNIQUE, + passwordHash TEXT NOT NULL, + firstName TEXT NOT NULL, + lastName TEXT NOT NULL, + isActive INTEGER NOT NULL DEFAULT 1, + isAdmin INTEGER NOT NULL DEFAULT 0, + createdAt TEXT NOT NULL, + updatedAt TEXT NOT NULL, + lastLoginAt TEXT + ) + `); + + // Load and execute audit schema (split multi-statement SQL) const schemaPath = join(__dirname, '../../src/database/migrations/004_audit_logging.sql'); const schema = readFileSync(schemaPath, 'utf-8'); - await new Promise((resolve, reject) => { - db.exec(schema, (err) => { - if (err) reject(err); - else resolve(); - }); - }); + // Split by semicolons and execute each statement + const statements = schema + .split(';') + .map(s => s.trim()) + .filter(s => s.length > 0); + + for (const stmt of statements) { + await db.execute(stmt); + } + + // Insert test users referenced by audit log entries (foreign key constraint) + const testUsers = ['user-1', 'user-2', 'user-123', 'user-456', 'admin-1', 'admin-123']; + for (const userId of testUsers) { + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + VALUES (?, ?, ?, ?, ?, ?, 1, 0, datetime('now'), datetime('now'))`, + [userId, `user-${userId}`, `${userId}@test.com`, 'hash', 'Test', 'User'] + ); + } auditLogger = new AuditLoggingService(db); }); afterEach(async () => { - await new Promise((resolve, reject) => { - db.close((err) => { - if (err) reject(err); - else resolve(); - }); - }); + await db.close(); }); describe('logAuthenticationAttempt', () => { diff --git a/backend/test/services/AuthenticationService.bruteforce.test.ts b/backend/test/services/AuthenticationService.bruteforce.test.ts index 6b3f09b9..a79cb77e 100644 --- a/backend/test/services/AuthenticationService.bruteforce.test.ts +++ b/backend/test/services/AuthenticationService.bruteforce.test.ts @@ -1,10 +1,11 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import sqlite3 from 'sqlite3'; +import { SQLiteAdapter } from '../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../src/database/DatabaseAdapter'; import { AuthenticationService } from '../../src/services/AuthenticationService'; import { randomUUID } from 'crypto'; describe('AuthenticationService - Brute Force Protection', () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let authService: AuthenticationService; const testUsername = 'testuser'; // pragma: allowlist secret const testPassword = 'TestPass123!'; // pragma: allowlist secret @@ -12,7 +13,8 @@ describe('AuthenticationService - Brute Force Protection', () => { beforeEach(async () => { // Create in-memory database - db = new sqlite3.Database(':memory:'); + db = new SQLiteAdapter(':memory:'); + await db.initialize(); // Initialize schema await runQuery(db, ` @@ -127,9 +129,7 @@ describe('AuthenticationService - Brute Force Protection', () => { }); afterEach(async () => { - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); }); it('should allow authentication with correct credentials', async () => { @@ -304,11 +304,6 @@ describe('AuthenticationService - Brute Force Protection', () => { }); // Helper function to run queries -function runQuery(db: sqlite3.Database, sql: string, params: any[] = []): Promise { - return new Promise((resolve, reject) => { - db.run(sql, params, (err) => { - if (err) reject(err); - else resolve(); - }); - }); +async function runQuery(db: DatabaseAdapter, sql: string, params: any[] = []): Promise { + await db.execute(sql, params); } diff --git a/backend/test/services/BatchExecutionService.test.ts b/backend/test/services/BatchExecutionService.test.ts index 08bb8b66..cd710a25 100644 --- a/backend/test/services/BatchExecutionService.test.ts +++ b/backend/test/services/BatchExecutionService.test.ts @@ -1,67 +1,117 @@ -import { describe, it, expect, beforeEach, vi } from "vitest"; -import sqlite3 from "sqlite3"; +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; +import { SQLiteAdapter } from "../../src/database/SQLiteAdapter"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; import { BatchExecutionService } from "../../src/services/BatchExecutionService"; import type { ExecutionQueue } from "../../src/services/ExecutionQueue"; import type { ExecutionRepository } from "../../src/database/ExecutionRepository"; import type { IntegrationManager } from "../../src/integrations/IntegrationManager"; +async function createSchema(db: DatabaseAdapter): Promise { + await db.execute(` + CREATE TABLE batch_executions ( + id TEXT PRIMARY KEY, + type TEXT NOT NULL, + action TEXT NOT NULL, + parameters TEXT, + target_nodes TEXT NOT NULL, + target_groups TEXT NOT NULL, + status TEXT NOT NULL, + created_at TEXT NOT NULL, + started_at TEXT, + completed_at TEXT, + user_id TEXT NOT NULL, + execution_ids TEXT NOT NULL, + stats_total INTEGER NOT NULL, + stats_queued INTEGER NOT NULL, + stats_running INTEGER NOT NULL, + stats_success INTEGER NOT NULL, + stats_failed INTEGER NOT NULL + ) + `); + await db.execute(` + CREATE TABLE executions ( + id TEXT PRIMARY KEY, + type TEXT NOT NULL, + target_nodes TEXT NOT NULL, + action TEXT NOT NULL, + parameters TEXT, + status TEXT NOT NULL, + started_at TEXT NOT NULL, + completed_at TEXT, + results TEXT NOT NULL, + batch_id TEXT, + batch_position INTEGER, + error TEXT + ) + `); +} + +async function insertBatch(db: DatabaseAdapter, batchId: string, overrides: Record = {}): Promise { + const defaults = { + type: "command", + action: "uptime", + parameters: null, + target_nodes: JSON.stringify(["node1", "node2", "node3"]), + target_groups: JSON.stringify([]), + status: "running", + created_at: "2024-01-01T10:00:00Z", + started_at: "2024-01-01T10:00:00Z", + user_id: "user1", + execution_ids: JSON.stringify(["exec1", "exec2", "exec3"]), + stats_total: 3, + stats_queued: 0, + stats_running: 1, + stats_success: 1, + stats_failed: 1, + ...overrides, + }; + await db.execute( + `INSERT INTO batch_executions ( + id, type, action, parameters, target_nodes, target_groups, + status, created_at, started_at, user_id, execution_ids, + stats_total, stats_queued, stats_running, stats_success, stats_failed + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + [ + batchId, defaults.type, defaults.action, defaults.parameters, + defaults.target_nodes, defaults.target_groups, defaults.status, + defaults.created_at, defaults.started_at, defaults.user_id, + defaults.execution_ids, defaults.stats_total, defaults.stats_queued, + defaults.stats_running, defaults.stats_success, defaults.stats_failed, + ] + ); +} + +async function insertExecution( + db: DatabaseAdapter, + exec: { id: string; nodeId: string; status: string; startedAt: string; completedAt: string | null; results: string }, + batchId: string, + position: number +): Promise { + await db.execute( + `INSERT INTO executions ( + id, type, target_nodes, action, parameters, status, + started_at, completed_at, results, batch_id, batch_position + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + [ + exec.id, "command", JSON.stringify([exec.nodeId]), "uptime", null, + exec.status, exec.startedAt, exec.completedAt, exec.results, + batchId, position, + ] + ); +} + describe("BatchExecutionService - getBatchStatus", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; let mockExecutionQueue: ExecutionQueue; let mockExecutionRepository: ExecutionRepository; let mockIntegrationManager: IntegrationManager; beforeEach(async () => { - // Create in-memory database - db = new sqlite3.Database(":memory:"); - - // Create tables - await new Promise((resolve, reject) => { - db.exec( - ` - CREATE TABLE batch_executions ( - id TEXT PRIMARY KEY, - type TEXT NOT NULL, - action TEXT NOT NULL, - parameters TEXT, - target_nodes TEXT NOT NULL, - target_groups TEXT NOT NULL, - status TEXT NOT NULL, - created_at TEXT NOT NULL, - started_at TEXT, - completed_at TEXT, - user_id TEXT NOT NULL, - execution_ids TEXT NOT NULL, - stats_total INTEGER NOT NULL, - stats_queued INTEGER NOT NULL, - stats_running INTEGER NOT NULL, - stats_success INTEGER NOT NULL, - stats_failed INTEGER NOT NULL - ); - - CREATE TABLE executions ( - id TEXT PRIMARY KEY, - type TEXT NOT NULL, - target_nodes TEXT NOT NULL, - action TEXT NOT NULL, - parameters TEXT, - status TEXT NOT NULL, - started_at TEXT NOT NULL, - completed_at TEXT, - results TEXT NOT NULL, - batch_id TEXT, - batch_position INTEGER - ); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + db = new SQLiteAdapter(":memory:"); + await db.initialize(); + await createSchema(db); - // Mock dependencies mockExecutionQueue = {} as ExecutionQueue; mockExecutionRepository = {} as ExecutionRepository; mockIntegrationManager = { @@ -75,138 +125,40 @@ describe("BatchExecutionService - getBatchStatus", () => { }), } as unknown as IntegrationManager; - service = new BatchExecutionService( - db, - mockExecutionQueue, - mockExecutionRepository, - mockIntegrationManager - ); + service = new BatchExecutionService(db, mockExecutionQueue, mockExecutionRepository, mockIntegrationManager); + }); + + afterEach(async () => { + await db.close(); }); it("should fetch batch status with aggregated statistics", async () => { - // Insert test batch const batchId = "batch-123"; - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO batch_executions ( - id, type, action, parameters, target_nodes, target_groups, - status, created_at, started_at, user_id, execution_ids, - stats_total, stats_queued, stats_running, stats_success, stats_failed - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - batchId, - "command", - "uptime", - null, - JSON.stringify(["node1", "node2", "node3"]), - JSON.stringify([]), - "running", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:00Z", - "user1", - JSON.stringify(["exec1", "exec2", "exec3"]), - 3, - 0, - 1, - 1, - 1, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await insertBatch(db, batchId); - // Insert test executions const executions = [ - { - id: "exec1", - nodeId: "node1", - status: "success", - startedAt: "2024-01-01T10:00:00Z", - completedAt: "2024-01-01T10:00:05Z", - results: JSON.stringify([ - { - nodeId: "node1", - status: "success", - output: { exitCode: 0, stdout: "up 5 days", stderr: "" }, - duration: 5000, - }, - ]), - }, - { - id: "exec2", - nodeId: "node2", - status: "failed", - startedAt: "2024-01-01T10:00:00Z", - completedAt: "2024-01-01T10:00:03Z", - results: JSON.stringify([ - { - nodeId: "node2", - status: "failed", - output: { exitCode: 1, stdout: "", stderr: "Connection refused" }, - duration: 3000, - }, - ]), - }, - { - id: "exec3", - nodeId: "node3", - status: "running", - startedAt: "2024-01-01T10:00:00Z", - completedAt: null, - results: JSON.stringify([]), - }, + { id: "exec1", nodeId: "node1", status: "success", startedAt: "2024-01-01T10:00:00Z", completedAt: "2024-01-01T10:00:05Z", + results: JSON.stringify([{ nodeId: "node1", status: "success", output: { exitCode: 0, stdout: "up 5 days", stderr: "" }, duration: 5000 }]) }, + { id: "exec2", nodeId: "node2", status: "failed", startedAt: "2024-01-01T10:00:00Z", completedAt: "2024-01-01T10:00:03Z", + results: JSON.stringify([{ nodeId: "node2", status: "failed", output: { exitCode: 1, stdout: "", stderr: "Connection refused" }, duration: 3000 }]) }, + { id: "exec3", nodeId: "node3", status: "running", startedAt: "2024-01-01T10:00:00Z", completedAt: null, results: JSON.stringify([]) }, ]; - for (const exec of executions) { - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO executions ( - id, type, target_nodes, action, parameters, status, - started_at, completed_at, results, batch_id, batch_position - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - exec.id, - "command", - JSON.stringify([exec.nodeId]), - "uptime", - null, - exec.status, - exec.startedAt, - exec.completedAt, - exec.results, - batchId, - executions.indexOf(exec), - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + for (let i = 0; i < executions.length; i++) { + await insertExecution(db, executions[i], batchId, i); } - // Test getBatchStatus const result = await service.getBatchStatus(batchId); - // Verify batch details expect(result.batch.id).toBe(batchId); expect(result.batch.type).toBe("command"); expect(result.batch.action).toBe("uptime"); expect(result.batch.targetNodes).toEqual(["node1", "node2", "node3"]); - - // Verify statistics expect(result.batch.stats.total).toBe(3); expect(result.batch.stats.success).toBe(1); expect(result.batch.stats.failed).toBe(1); expect(result.batch.stats.running).toBe(1); - - // Verify progress - expect(result.progress).toBe(67); // 2 completed out of 3 = 66.67% rounded to 67 - - // Verify executions + expect(result.progress).toBe(67); expect(result.executions).toHaveLength(3); expect(result.executions[0].nodeId).toBe("node1"); expect(result.executions[0].nodeName).toBe("server1.example.com"); @@ -217,312 +169,127 @@ describe("BatchExecutionService - getBatchStatus", () => { }); it("should throw error when batch does not exist", async () => { - await expect(service.getBatchStatus("nonexistent")).rejects.toThrow( - "Batch execution nonexistent not found" - ); + await expect(service.getBatchStatus("nonexistent")).rejects.toThrow("Batch execution nonexistent not found"); }); it("should support status filtering", async () => { - // Insert test batch const batchId = "batch-456"; - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO batch_executions ( - id, type, action, parameters, target_nodes, target_groups, - status, created_at, started_at, user_id, execution_ids, - stats_total, stats_queued, stats_running, stats_success, stats_failed - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - batchId, - "command", - "uptime", - null, - JSON.stringify(["node1", "node2"]), - JSON.stringify([]), - "partial", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:00Z", - "user1", - JSON.stringify(["exec1", "exec2"]), - 2, - 0, - 0, - 1, - 1, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); + await insertBatch(db, batchId, { + target_nodes: JSON.stringify(["node1", "node2"]), + status: "partial", + execution_ids: JSON.stringify(["exec1", "exec2"]), + stats_total: 2, stats_queued: 0, stats_running: 0, stats_success: 1, stats_failed: 1, }); - // Insert test executions const executions = [ - { - id: "exec1", - nodeId: "node1", - status: "success", - startedAt: "2024-01-01T10:00:00Z", - completedAt: "2024-01-01T10:00:05Z", - results: JSON.stringify([]), - }, - { - id: "exec2", - nodeId: "node2", - status: "failed", - startedAt: "2024-01-01T10:00:00Z", - completedAt: "2024-01-01T10:00:03Z", - results: JSON.stringify([]), - }, + { id: "exec1", nodeId: "node1", status: "success", startedAt: "2024-01-01T10:00:00Z", completedAt: "2024-01-01T10:00:05Z", results: JSON.stringify([]) }, + { id: "exec2", nodeId: "node2", status: "failed", startedAt: "2024-01-01T10:00:00Z", completedAt: "2024-01-01T10:00:03Z", results: JSON.stringify([]) }, ]; - for (const exec of executions) { - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO executions ( - id, type, target_nodes, action, parameters, status, - started_at, completed_at, results, batch_id, batch_position - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - exec.id, - "command", - JSON.stringify([exec.nodeId]), - "uptime", - null, - exec.status, - exec.startedAt, - exec.completedAt, - exec.results, - batchId, - executions.indexOf(exec), - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + for (let i = 0; i < executions.length; i++) { + await insertExecution(db, executions[i], batchId, i); } - // Test with status filter const result = await service.getBatchStatus(batchId, "success"); - - // Should only return success executions expect(result.executions).toHaveLength(1); expect(result.executions[0].status).toBe("success"); - - // But stats should still reflect all executions expect(result.batch.stats.total).toBe(2); expect(result.batch.stats.success).toBe(1); expect(result.batch.stats.failed).toBe(1); }); it("should calculate correct progress percentage", async () => { - // Insert test batch with all completed const batchId = "batch-789"; - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO batch_executions ( - id, type, action, parameters, target_nodes, target_groups, - status, created_at, started_at, user_id, execution_ids, - stats_total, stats_queued, stats_running, stats_success, stats_failed - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - batchId, - "command", - "uptime", - null, - JSON.stringify(["node1", "node2"]), - JSON.stringify([]), - "success", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:00Z", - "user1", - JSON.stringify(["exec1", "exec2"]), - 2, - 0, - 0, - 2, - 0, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); + await insertBatch(db, batchId, { + target_nodes: JSON.stringify(["node1", "node2"]), + status: "success", + execution_ids: JSON.stringify(["exec1", "exec2"]), + stats_total: 2, stats_queued: 0, stats_running: 0, stats_success: 2, stats_failed: 0, }); - // Insert completed executions for (let i = 1; i <= 2; i++) { - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO executions ( - id, type, target_nodes, action, parameters, status, - started_at, completed_at, results, batch_id, batch_position - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - `exec${i}`, - "command", - JSON.stringify([`node${i}`]), - "uptime", - null, - "success", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:05Z", - JSON.stringify([]), - batchId, - i - 1, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await insertExecution(db, { + id: `exec${i}`, nodeId: `node${i}`, status: "success", + startedAt: "2024-01-01T10:00:00Z", completedAt: "2024-01-01T10:00:05Z", results: JSON.stringify([]), + }, batchId, i - 1); } const result = await service.getBatchStatus(batchId); - - // All completed = 100% expect(result.progress).toBe(100); expect(result.batch.status).toBe("success"); }); }); describe("BatchExecutionService - expandGroups", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; let mockExecutionQueue: ExecutionQueue; let mockExecutionRepository: ExecutionRepository; let mockIntegrationManager: IntegrationManager; - beforeEach(() => { - db = new sqlite3.Database(":memory:"); + beforeEach(async () => { + db = new SQLiteAdapter(":memory:"); + await db.initialize(); mockExecutionQueue = {} as ExecutionQueue; mockExecutionRepository = {} as ExecutionRepository; mockIntegrationManager = { getAggregatedInventory: vi.fn(), } as unknown as IntegrationManager; - service = new BatchExecutionService( - db, - mockExecutionQueue, - mockExecutionRepository, - mockIntegrationManager - ); + service = new BatchExecutionService(db, mockExecutionQueue, mockExecutionRepository, mockIntegrationManager); + }); + + afterEach(async () => { + await db.close(); }); it("should expand single group to node IDs", async () => { - // Mock inventory with a group vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - { id: "node2", name: "server2.example.com" }, - ], - groups: [ - { - id: "group1", - name: "web-servers", - source: "bolt", - nodes: ["node1", "node2"], - }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }, { id: "node2", name: "server2.example.com" }], + groups: [{ id: "group1", name: "web-servers", source: "bolt", nodes: ["node1", "node2"] }], }); - - // Access private method via type assertion const result = await (service as any).expandGroups(["group1"]); - expect(result).toEqual(["node1", "node2"]); }); it("should expand multiple groups to node IDs", async () => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - { id: "node2", name: "server2.example.com" }, - { id: "node3", name: "server3.example.com" }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }, { id: "node2", name: "server2.example.com" }, { id: "node3", name: "server3.example.com" }], groups: [ - { - id: "group1", - name: "web-servers", - source: "bolt", - nodes: ["node1", "node2"], - }, - { - id: "group2", - name: "db-servers", - source: "bolt", - nodes: ["node3"], - }, + { id: "group1", name: "web-servers", source: "bolt", nodes: ["node1", "node2"] }, + { id: "group2", name: "db-servers", source: "bolt", nodes: ["node3"] }, ], }); - const result = await (service as any).expandGroups(["group1", "group2"]); - expect(result).toEqual(["node1", "node2", "node3"]); }); it("should handle linked groups from multiple sources", async () => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - { id: "node2", name: "server2.example.com" }, - { id: "node3", name: "server3.example.com" }, - ], - groups: [ - { - id: "group1", - name: "production", - source: "bolt", - nodes: ["node1", "node2", "node3"], // Linked group includes all nodes - }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }, { id: "node2", name: "server2.example.com" }, { id: "node3", name: "server3.example.com" }], + groups: [{ id: "group1", name: "production", source: "bolt", nodes: ["node1", "node2", "node3"] }], }); - const result = await (service as any).expandGroups(["group1"]); - expect(result).toEqual(["node1", "node2", "node3"]); }); it("should skip missing groups and continue with others", async () => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - { id: "node2", name: "server2.example.com" }, - ], - groups: [ - { - id: "group1", - name: "web-servers", - source: "bolt", - nodes: ["node1", "node2"], - }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }, { id: "node2", name: "server2.example.com" }], + groups: [{ id: "group1", name: "web-servers", source: "bolt", nodes: ["node1", "node2"] }], }); - - // Request includes a missing group const result = await (service as any).expandGroups(["group1", "missing-group"]); - - // Should return nodes from group1 and skip missing-group expect(result).toEqual(["node1", "node2"]); }); it("should return empty array when all groups are missing", async () => { - vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [], - groups: [], - }); - + vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ nodes: [], groups: [] }); const result = await (service as any).expandGroups(["missing1", "missing2"]); - expect(result).toEqual([]); }); it("should return empty array when no groups provided", async () => { const result = await (service as any).expandGroups([]); - expect(result).toEqual([]); }); @@ -530,81 +297,48 @@ describe("BatchExecutionService - expandGroups", () => { let callCount = 0; vi.mocked(mockIntegrationManager.getAggregatedInventory).mockImplementation(async () => { callCount++; - if (callCount === 1) { - throw new Error("Network error"); - } + if (callCount === 1) throw new Error("Network error"); return { nodes: [{ id: "node1", name: "server1.example.com" }], - groups: [ - { - id: "group2", - name: "db-servers", - source: "bolt", - nodes: ["node1"], - }, - ], + groups: [{ id: "group2", name: "db-servers", source: "bolt", nodes: ["node1"] }], }; }); - const result = await (service as any).expandGroups(["group1", "group2"]); - - // Should continue after error and process group2 expect(result).toEqual(["node1"]); }); }); describe("BatchExecutionService - deduplicateNodes", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; - let mockExecutionQueue: ExecutionQueue; - let mockExecutionRepository: ExecutionRepository; - let mockIntegrationManager: IntegrationManager; - beforeEach(() => { - db = new sqlite3.Database(":memory:"); - mockExecutionQueue = {} as ExecutionQueue; - mockExecutionRepository = {} as ExecutionRepository; - mockIntegrationManager = {} as IntegrationManager; - - service = new BatchExecutionService( - db, - mockExecutionQueue, - mockExecutionRepository, - mockIntegrationManager - ); + beforeEach(async () => { + db = new SQLiteAdapter(":memory:"); + await db.initialize(); + service = new BatchExecutionService(db, {} as ExecutionQueue, {} as ExecutionRepository, {} as IntegrationManager); }); - it("should remove duplicate node IDs", () => { - const result = (service as any).deduplicateNodes([ - "node1", - "node2", - "node1", - "node3", - "node2", - ]); + afterEach(async () => { + await db.close(); + }); + it("should remove duplicate node IDs", () => { + const result = (service as any).deduplicateNodes(["node1", "node2", "node1", "node3", "node2"]); expect(result).toEqual(["node1", "node2", "node3"]); }); it("should handle array with no duplicates", () => { const result = (service as any).deduplicateNodes(["node1", "node2", "node3"]); - expect(result).toEqual(["node1", "node2", "node3"]); }); it("should handle empty array", () => { const result = (service as any).deduplicateNodes([]); - expect(result).toEqual([]); }); it("should handle array with all duplicates", () => { - const result = (service as any).deduplicateNodes([ - "node1", - "node1", - "node1", - ]); - + const result = (service as any).deduplicateNodes(["node1", "node1", "node1"]); expect(result).toEqual(["node1"]); }); @@ -612,146 +346,69 @@ describe("BatchExecutionService - deduplicateNodes", () => { const input = ["node1", "node2", "node1", "node3", "node2"]; const firstPass = (service as any).deduplicateNodes(input); const secondPass = (service as any).deduplicateNodes(firstPass); - expect(firstPass).toEqual(secondPass); expect(firstPass).toEqual(["node1", "node2", "node3"]); }); }); describe("BatchExecutionService - validateNodes", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; - let mockExecutionQueue: ExecutionQueue; - let mockExecutionRepository: ExecutionRepository; let mockIntegrationManager: IntegrationManager; - beforeEach(() => { - db = new sqlite3.Database(":memory:"); - mockExecutionQueue = {} as ExecutionQueue; - mockExecutionRepository = {} as ExecutionRepository; - mockIntegrationManager = { - getAggregatedInventory: vi.fn(), - } as unknown as IntegrationManager; + beforeEach(async () => { + db = new SQLiteAdapter(":memory:"); + await db.initialize(); + mockIntegrationManager = { getAggregatedInventory: vi.fn() } as unknown as IntegrationManager; + service = new BatchExecutionService(db, {} as ExecutionQueue, {} as ExecutionRepository, mockIntegrationManager); + }); - service = new BatchExecutionService( - db, - mockExecutionQueue, - mockExecutionRepository, - mockIntegrationManager - ); + afterEach(async () => { + await db.close(); }); it("should validate all nodes exist", async () => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - { id: "node2", name: "server2.example.com" }, - { id: "node3", name: "server3.example.com" }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }, { id: "node2", name: "server2.example.com" }, { id: "node3", name: "server3.example.com" }], groups: [], }); - - await expect( - (service as any).validateNodes(["node1", "node2", "node3"]) - ).resolves.toBeUndefined(); + await expect((service as any).validateNodes(["node1", "node2", "node3"])).resolves.toBeUndefined(); }); it("should throw error for invalid node IDs", async () => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - { id: "node2", name: "server2.example.com" }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }, { id: "node2", name: "server2.example.com" }], groups: [], }); - - await expect( - (service as any).validateNodes(["node1", "invalid1", "invalid2"]) - ).rejects.toThrow("Invalid node IDs: invalid1, invalid2"); + await expect((service as any).validateNodes(["node1", "invalid1", "invalid2"])).rejects.toThrow("Invalid node IDs: invalid1, invalid2"); }); it("should throw error when all nodes are invalid", async () => { vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }], groups: [], }); - - await expect( - (service as any).validateNodes(["invalid1", "invalid2"]) - ).rejects.toThrow("Invalid node IDs: invalid1, invalid2"); + await expect((service as any).validateNodes(["invalid1", "invalid2"])).rejects.toThrow("Invalid node IDs: invalid1, invalid2"); }); it("should handle empty node array", async () => { - vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ - nodes: [], - groups: [], - }); - - await expect( - (service as any).validateNodes([]) - ).resolves.toBeUndefined(); + vi.mocked(mockIntegrationManager.getAggregatedInventory).mockResolvedValue({ nodes: [], groups: [] }); + await expect((service as any).validateNodes([])).resolves.toBeUndefined(); }); }); describe("BatchExecutionService - createBatch", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; let mockExecutionQueue: ExecutionQueue; let mockExecutionRepository: ExecutionRepository; let mockIntegrationManager: IntegrationManager; beforeEach(async () => { - // Create in-memory database - db = new sqlite3.Database(":memory:"); - - // Create tables - await new Promise((resolve, reject) => { - db.exec( - ` - CREATE TABLE batch_executions ( - id TEXT PRIMARY KEY, - type TEXT NOT NULL, - action TEXT NOT NULL, - parameters TEXT, - target_nodes TEXT NOT NULL, - target_groups TEXT NOT NULL, - status TEXT NOT NULL, - created_at TEXT NOT NULL, - started_at TEXT, - completed_at TEXT, - user_id TEXT NOT NULL, - execution_ids TEXT NOT NULL, - stats_total INTEGER NOT NULL, - stats_queued INTEGER NOT NULL, - stats_running INTEGER NOT NULL, - stats_success INTEGER NOT NULL, - stats_failed INTEGER NOT NULL - ); - - CREATE TABLE executions ( - id TEXT PRIMARY KEY, - type TEXT NOT NULL, - target_nodes TEXT NOT NULL, - action TEXT NOT NULL, - parameters TEXT, - status TEXT NOT NULL, - started_at TEXT NOT NULL, - completed_at TEXT, - results TEXT NOT NULL, - batch_id TEXT, - batch_position INTEGER - ); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + db = new SQLiteAdapter(":memory:"); + await db.initialize(); + await createSchema(db); - // Mock dependencies mockExecutionQueue = { acquire: vi.fn().mockResolvedValue(undefined), release: vi.fn(), @@ -772,66 +429,34 @@ describe("BatchExecutionService - createBatch", () => { { id: "node2", name: "server2.example.com" }, { id: "node3", name: "server3.example.com" }, ], - groups: [ - { - id: "group1", - name: "web-servers", - source: "bolt", - nodes: ["node1", "node2"], - }, - ], + groups: [{ id: "group1", name: "web-servers", source: "bolt", nodes: ["node1", "node2"] }], }), executeAction: vi.fn().mockResolvedValue({ status: "success", completedAt: new Date().toISOString(), - results: [ - { - nodeId: "node1", - status: "success", - duration: 100, - }, - ], + results: [{ nodeId: "node1", status: "success", duration: 100 }], }), } as unknown as IntegrationManager; - service = new BatchExecutionService( - db, - mockExecutionQueue, - mockExecutionRepository, - mockIntegrationManager - ); + service = new BatchExecutionService(db, mockExecutionQueue, mockExecutionRepository, mockIntegrationManager); }); - it("should create batch with direct node IDs", async () => { - const request = { - targetNodeIds: ["node1", "node2"], - type: "command" as const, - action: "uptime", - }; - - const result = await service.createBatch(request, "user1"); + afterEach(async () => { + await db.close(); + }); + it("should create batch with direct node IDs", async () => { + const result = await service.createBatch({ targetNodeIds: ["node1", "node2"], type: "command", action: "uptime" }, "user1"); expect(result.batchId).toBeDefined(); expect(result.executionIds).toHaveLength(2); expect(result.targetCount).toBe(2); expect(result.expandedNodeIds).toEqual(["node1", "node2"]); - - // Verify execution queue was called for each node expect(mockExecutionQueue.acquire).toHaveBeenCalledTimes(2); - - // Verify execution repository was called for each node expect(mockExecutionRepository.create).toHaveBeenCalledTimes(2); }); it("should create batch with group IDs and expand them", async () => { - const request = { - targetGroupIds: ["group1"], - type: "command" as const, - action: "uptime", - }; - - const result = await service.createBatch(request, "user1"); - + const result = await service.createBatch({ targetGroupIds: ["group1"], type: "command", action: "uptime" }, "user1"); expect(result.batchId).toBeDefined(); expect(result.executionIds).toHaveLength(2); expect(result.targetCount).toBe(2); @@ -839,15 +464,7 @@ describe("BatchExecutionService - createBatch", () => { }); it("should create batch with mixed node and group IDs", async () => { - const request = { - targetNodeIds: ["node3"], - targetGroupIds: ["group1"], - type: "command" as const, - action: "uptime", - }; - - const result = await service.createBatch(request, "user1"); - + const result = await service.createBatch({ targetNodeIds: ["node3"], targetGroupIds: ["group1"], type: "command", action: "uptime" }, "user1"); expect(result.batchId).toBeDefined(); expect(result.executionIds).toHaveLength(3); expect(result.targetCount).toBe(3); @@ -855,55 +472,29 @@ describe("BatchExecutionService - createBatch", () => { }); it("should deduplicate nodes from overlapping groups", async () => { - const request = { - targetNodeIds: ["node1"], - targetGroupIds: ["group1"], // group1 contains node1 and node2 - type: "command" as const, - action: "uptime", - }; - - const result = await service.createBatch(request, "user1"); - - // node1 appears in both targetNodeIds and group1, should be deduplicated + const result = await service.createBatch({ targetNodeIds: ["node1"], targetGroupIds: ["group1"], type: "command", action: "uptime" }, "user1"); expect(result.targetCount).toBe(2); expect(result.expandedNodeIds).toEqual(["node1", "node2"]); }); it("should create batch with parameters", async () => { - const request = { - targetNodeIds: ["node1"], - type: "task" as const, - action: "package::install", + const result = await service.createBatch({ + targetNodeIds: ["node1"], type: "task", action: "package::install", parameters: { package: "nginx", version: "latest" }, - }; - - const result = await service.createBatch(request, "user1"); - + }, "user1"); expect(result.batchId).toBeDefined(); expect(result.executionIds).toHaveLength(1); - - // Verify parameters were passed to execution repository - expect(mockExecutionRepository.create).toHaveBeenCalledWith( - expect.objectContaining({ - parameters: { package: "nginx", version: "latest" }, - }) - ); + expect(mockExecutionRepository.create).toHaveBeenCalledWith(expect.objectContaining({ + parameters: { package: "nginx", version: "latest" }, + })); }); it("should throw error for invalid node IDs", async () => { - const request = { - targetNodeIds: ["invalid-node"], - type: "command" as const, - action: "uptime", - }; - - await expect(service.createBatch(request, "user1")).rejects.toThrow( - "Invalid node IDs: invalid-node" - ); + await expect(service.createBatch({ targetNodeIds: ["invalid-node"], type: "command", action: "uptime" }, "user1")) + .rejects.toThrow("Invalid node IDs: invalid-node"); }); it("should handle execution queue full error", async () => { - // Mock queue to throw error on second acquire let acquireCount = 0; vi.mocked(mockExecutionQueue.acquire).mockImplementation(async () => { acquireCount++; @@ -913,39 +504,13 @@ describe("BatchExecutionService - createBatch", () => { throw error; } }); - - const request = { - targetNodeIds: ["node1", "node2"], - type: "command" as const, - action: "uptime", - }; - - await expect(service.createBatch(request, "user1")).rejects.toThrow( - "Failed to enqueue execution for node node2" - ); + await expect(service.createBatch({ targetNodeIds: ["node1", "node2"], type: "command", action: "uptime" }, "user1")) + .rejects.toThrow("Failed to enqueue execution for node node2"); }); it("should create batch record in database with correct stats", async () => { - const request = { - targetNodeIds: ["node1", "node2", "node3"], - type: "command" as const, - action: "uptime", - }; - - const result = await service.createBatch(request, "user1"); - - // Verify batch record was created - const batchRow = await new Promise((resolve, reject) => { - db.get( - "SELECT * FROM batch_executions WHERE id = ?", - [result.batchId], - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); - + const result = await service.createBatch({ targetNodeIds: ["node1", "node2", "node3"], type: "command", action: "uptime" }, "user1"); + const batchRow = await db.queryOne("SELECT * FROM batch_executions WHERE id = ?", [result.batchId]); expect(batchRow).toBeDefined(); expect(batchRow.type).toBe("command"); expect(batchRow.action).toBe("uptime"); @@ -953,202 +518,57 @@ describe("BatchExecutionService - createBatch", () => { expect(batchRow.user_id).toBe("user1"); expect(batchRow.stats_total).toBe(3); expect(batchRow.stats_queued).toBe(3); - expect(batchRow.stats_running).toBe(0); - expect(batchRow.stats_success).toBe(0); - expect(batchRow.stats_failed).toBe(0); expect(JSON.parse(batchRow.target_nodes)).toEqual(["node1", "node2", "node3"]); expect(JSON.parse(batchRow.execution_ids)).toHaveLength(3); }); it("should set batch_id and batch_position on execution records", async () => { - const request = { - targetNodeIds: ["node1", "node2"], - type: "command" as const, - action: "uptime", - }; - - await service.createBatch(request, "user1"); - - // Verify execution records have batch tracking + await service.createBatch({ targetNodeIds: ["node1", "node2"], type: "command", action: "uptime" }, "user1"); const calls = vi.mocked(mockExecutionRepository.create).mock.calls; - expect(calls[0][0]).toMatchObject({ - batchId: expect.any(String), - batchPosition: 0, - }); - expect(calls[1][0]).toMatchObject({ - batchId: expect.any(String), - batchPosition: 1, - }); + expect(calls[0][0]).toMatchObject({ batchId: expect.any(String), batchPosition: 0 }); + expect(calls[1][0]).toMatchObject({ batchId: expect.any(String), batchPosition: 1 }); }); }); describe("BatchExecutionService - cancelBatch", () => { - let db: sqlite3.Database; + let db: DatabaseAdapter; let service: BatchExecutionService; - let mockExecutionQueue: ExecutionQueue; - let mockExecutionRepository: ExecutionRepository; let mockIntegrationManager: IntegrationManager; beforeEach(async () => { - // Create in-memory database - db = new sqlite3.Database(":memory:"); - - // Create tables - await new Promise((resolve, reject) => { - db.exec( - ` - CREATE TABLE batch_executions ( - id TEXT PRIMARY KEY, - type TEXT NOT NULL, - action TEXT NOT NULL, - parameters TEXT, - target_nodes TEXT NOT NULL, - target_groups TEXT NOT NULL, - status TEXT NOT NULL, - created_at TEXT NOT NULL, - started_at TEXT, - completed_at TEXT, - user_id TEXT NOT NULL, - execution_ids TEXT NOT NULL, - stats_total INTEGER NOT NULL, - stats_queued INTEGER NOT NULL, - stats_running INTEGER NOT NULL, - stats_success INTEGER NOT NULL, - stats_failed INTEGER NOT NULL - ); - - CREATE TABLE executions ( - id TEXT PRIMARY KEY, - type TEXT NOT NULL, - target_nodes TEXT NOT NULL, - action TEXT NOT NULL, - parameters TEXT, - status TEXT NOT NULL, - started_at TEXT NOT NULL, - completed_at TEXT, - results TEXT NOT NULL, - batch_id TEXT, - batch_position INTEGER, - error TEXT - ); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + db = new SQLiteAdapter(":memory:"); + await db.initialize(); + await createSchema(db); - // Mock dependencies - mockExecutionQueue = {} as ExecutionQueue; - mockExecutionRepository = {} as ExecutionRepository; mockIntegrationManager = { getAggregatedInventory: vi.fn().mockResolvedValue({ - nodes: [ - { id: "node1", name: "server1.example.com" }, - { id: "node2", name: "server2.example.com" }, - { id: "node3", name: "server3.example.com" }, - ], + nodes: [{ id: "node1", name: "server1.example.com" }, { id: "node2", name: "server2.example.com" }, { id: "node3", name: "server3.example.com" }], groups: [], }), } as unknown as IntegrationManager; - service = new BatchExecutionService( - db, - mockExecutionQueue, - mockExecutionRepository, - mockIntegrationManager - ); + service = new BatchExecutionService(db, {} as ExecutionQueue, {} as ExecutionRepository, mockIntegrationManager); + }); + + afterEach(async () => { + await db.close(); }); it("should cancel running executions and update batch status", async () => { - // Insert test batch const batchId = "batch-cancel-1"; - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO batch_executions ( - id, type, action, parameters, target_nodes, target_groups, - status, created_at, started_at, user_id, execution_ids, - stats_total, stats_queued, stats_running, stats_success, stats_failed - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - batchId, - "command", - "uptime", - null, - JSON.stringify(["node1", "node2", "node3"]), - JSON.stringify([]), - "running", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:00Z", - "user1", - JSON.stringify(["exec1", "exec2", "exec3"]), - 3, - 0, - 3, - 0, - 0, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await insertBatch(db, batchId, { stats_running: 3, stats_success: 0, stats_failed: 0 }); - // Insert running executions - const executions = [ - { id: "exec1", nodeId: "node1", status: "running" }, - { id: "exec2", nodeId: "node2", status: "running" }, - { id: "exec3", nodeId: "node3", status: "running" }, - ]; - - for (const exec of executions) { - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO executions ( - id, type, target_nodes, action, parameters, status, - started_at, completed_at, results, batch_id, batch_position - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - exec.id, - "command", - JSON.stringify([exec.nodeId]), - "uptime", - null, - exec.status, - "2024-01-01T10:00:00Z", - null, - JSON.stringify([]), - batchId, - executions.indexOf(exec), - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + for (let i = 0; i < 3; i++) { + await insertExecution(db, { + id: `exec${i + 1}`, nodeId: `node${i + 1}`, status: "running", + startedAt: "2024-01-01T10:00:00Z", completedAt: null, results: JSON.stringify([]), + }, batchId, i); } - // Cancel the batch const result = await service.cancelBatch(batchId); - - // Verify cancelled count expect(result.cancelledCount).toBe(3); - // Verify executions are marked as failed with error message - const updatedExecutions = await new Promise((resolve, reject) => { - db.all( - "SELECT * FROM executions WHERE batch_id = ?", - [batchId], - (err, rows) => { - if (err) reject(err); - else resolve(rows || []); - } - ); - }); - + const updatedExecutions = await db.query("SELECT * FROM executions WHERE batch_id = ?", [batchId]); expect(updatedExecutions).toHaveLength(3); for (const exec of updatedExecutions) { expect(exec.status).toBe("failed"); @@ -1156,202 +576,51 @@ describe("BatchExecutionService - cancelBatch", () => { expect(exec.completed_at).toBeTruthy(); } - // Verify batch status is updated to cancelled - const updatedBatch = await new Promise((resolve, reject) => { - db.get( - "SELECT * FROM batch_executions WHERE id = ?", - [batchId], - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); - + const updatedBatch = await db.queryOne("SELECT * FROM batch_executions WHERE id = ?", [batchId]); expect(updatedBatch.status).toBe("cancelled"); expect(updatedBatch.completed_at).toBeTruthy(); }); it("should only cancel running executions, not completed ones", async () => { - // Insert test batch const batchId = "batch-cancel-2"; - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO batch_executions ( - id, type, action, parameters, target_nodes, target_groups, - status, created_at, started_at, user_id, execution_ids, - stats_total, stats_queued, stats_running, stats_success, stats_failed - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - batchId, - "command", - "uptime", - null, - JSON.stringify(["node1", "node2", "node3"]), - JSON.stringify([]), - "running", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:00Z", - "user1", - JSON.stringify(["exec1", "exec2", "exec3"]), - 3, - 0, - 2, - 1, - 0, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await insertBatch(db, batchId, { stats_running: 2, stats_success: 1, stats_failed: 0 }); - // Insert mixed status executions - const executions = [ - { id: "exec1", nodeId: "node1", status: "success", completedAt: "2024-01-01T10:00:05Z" }, - { id: "exec2", nodeId: "node2", status: "running", completedAt: null }, - { id: "exec3", nodeId: "node3", status: "running", completedAt: null }, - ]; + await insertExecution(db, { id: "exec1", nodeId: "node1", status: "success", startedAt: "2024-01-01T10:00:00Z", completedAt: "2024-01-01T10:00:05Z", results: JSON.stringify([]) }, batchId, 0); + await insertExecution(db, { id: "exec2", nodeId: "node2", status: "running", startedAt: "2024-01-01T10:00:00Z", completedAt: null, results: JSON.stringify([]) }, batchId, 1); + await insertExecution(db, { id: "exec3", nodeId: "node3", status: "running", startedAt: "2024-01-01T10:00:00Z", completedAt: null, results: JSON.stringify([]) }, batchId, 2); - for (const exec of executions) { - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO executions ( - id, type, target_nodes, action, parameters, status, - started_at, completed_at, results, batch_id, batch_position - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - exec.id, - "command", - JSON.stringify([exec.nodeId]), - "uptime", - null, - exec.status, - "2024-01-01T10:00:00Z", - exec.completedAt, - JSON.stringify([]), - batchId, - executions.indexOf(exec), - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); - } - - // Cancel the batch const result = await service.cancelBatch(batchId); - - // Should only cancel the 2 running executions expect(result.cancelledCount).toBe(2); - // Verify the success execution is unchanged - const successExecution = await new Promise((resolve, reject) => { - db.get( - "SELECT * FROM executions WHERE id = ?", - ["exec1"], - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); - + const successExecution = await db.queryOne("SELECT * FROM executions WHERE id = ?", ["exec1"]); expect(successExecution.status).toBe("success"); expect(successExecution.error).toBeNull(); }); it("should throw error when batch does not exist", async () => { - await expect(service.cancelBatch("nonexistent")).rejects.toThrow( - "Batch execution nonexistent not found" - ); + await expect(service.cancelBatch("nonexistent")).rejects.toThrow("Batch execution nonexistent not found"); }); it("should return zero cancelled count when no running executions", async () => { - // Insert test batch with all completed executions const batchId = "batch-cancel-3"; - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO batch_executions ( - id, type, action, parameters, target_nodes, target_groups, - status, created_at, started_at, user_id, execution_ids, - stats_total, stats_queued, stats_running, stats_success, stats_failed - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - batchId, - "command", - "uptime", - null, - JSON.stringify(["node1", "node2"]), - JSON.stringify([]), - "success", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:00Z", - "user1", - JSON.stringify(["exec1", "exec2"]), - 2, - 0, - 0, - 2, - 0, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); + await insertBatch(db, batchId, { + target_nodes: JSON.stringify(["node1", "node2"]), + status: "success", + execution_ids: JSON.stringify(["exec1", "exec2"]), + stats_total: 2, stats_queued: 0, stats_running: 0, stats_success: 2, stats_failed: 0, }); - // Insert completed executions for (let i = 1; i <= 2; i++) { - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO executions ( - id, type, target_nodes, action, parameters, status, - started_at, completed_at, results, batch_id, batch_position - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - `exec${i}`, - "command", - JSON.stringify([`node${i}`]), - "uptime", - null, - "success", - "2024-01-01T10:00:00Z", - "2024-01-01T10:00:05Z", - JSON.stringify([]), - batchId, - i - 1, - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await insertExecution(db, { + id: `exec${i}`, nodeId: `node${i}`, status: "success", + startedAt: "2024-01-01T10:00:00Z", completedAt: "2024-01-01T10:00:05Z", results: JSON.stringify([]), + }, batchId, i - 1); } - // Cancel the batch const result = await service.cancelBatch(batchId); - - // Should return 0 since no running executions expect(result.cancelledCount).toBe(0); - // Batch should still be marked as cancelled - const updatedBatch = await new Promise((resolve, reject) => { - db.get( - "SELECT * FROM batch_executions WHERE id = ?", - [batchId], - (err, row) => { - if (err) reject(err); - else resolve(row); - } - ); - }); - + const updatedBatch = await db.queryOne("SELECT * FROM batch_executions WHERE id = ?", [batchId]); expect(updatedBatch.status).toBe("cancelled"); }); }); diff --git a/backend/test/services/IntegrationColorService.test.ts b/backend/test/services/IntegrationColorService.test.ts index ec03aada..fdd45f5c 100644 --- a/backend/test/services/IntegrationColorService.test.ts +++ b/backend/test/services/IntegrationColorService.test.ts @@ -12,36 +12,36 @@ describe('IntegrationColorService', () => { it('should return correct color for bolt integration', () => { const color = service.getColor('bolt'); expect(color).toEqual({ - primary: '#FFAE1A', - light: '#FFF4E0', - dark: '#CC8B15', + primary: '#22C55E', + light: '#F0FDF4', + dark: '#16A34A', }); }); it('should return correct color for puppetdb integration', () => { const color = service.getColor('puppetdb'); expect(color).toEqual({ - primary: '#9063CD', - light: '#F0E6FF', - dark: '#7249A8', + primary: '#F97316', + light: '#FFF7ED', + dark: '#EA580C', }); }); it('should return correct color for puppetserver integration', () => { const color = service.getColor('puppetserver'); expect(color).toEqual({ - primary: '#2E3A87', - light: '#E8EAFF', - dark: '#1F2760', + primary: '#EF4444', + light: '#FEF2F2', + dark: '#DC2626', }); }); it('should return correct color for hiera integration', () => { const color = service.getColor('hiera'); expect(color).toEqual({ - primary: '#C1272D', - light: '#FFE8E9', - dark: '#9A1F24', + primary: '#F59E0B', + light: '#FFFBEB', + dark: '#D97706', }); }); @@ -93,7 +93,7 @@ describe('IntegrationColorService', () => { describe('getValidIntegrations', () => { it('should return array of valid integration names', () => { const integrations = service.getValidIntegrations(); - expect(integrations).toEqual(['bolt', 'ansible', 'puppetdb', 'puppetserver', 'hiera', 'ssh']); + expect(integrations).toEqual(['proxmox', 'aws', 'bolt', 'ansible', 'ssh', 'puppetdb', 'puppetserver', 'hiera']); }); }); diff --git a/backend/test/services/IntegrationConfigService.test.ts b/backend/test/services/IntegrationConfigService.test.ts new file mode 100644 index 00000000..4dfe088c --- /dev/null +++ b/backend/test/services/IntegrationConfigService.test.ts @@ -0,0 +1,508 @@ +import { describe, it, expect, beforeEach, vi } from "vitest"; +import { IntegrationConfigService } from "../../src/services/IntegrationConfigService"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; + +const TEST_SECRET = "test-jwt-secret-for-encryption-testing"; + +function createMockDb(): DatabaseAdapter { + return { + query: vi.fn().mockResolvedValue([]), + queryOne: vi.fn().mockResolvedValue(null), + execute: vi.fn().mockResolvedValue({ changes: 1 }), + beginTransaction: vi.fn().mockResolvedValue(undefined), + commit: vi.fn().mockResolvedValue(undefined), + rollback: vi.fn().mockResolvedValue(undefined), + withTransaction: vi.fn(), + initialize: vi.fn().mockResolvedValue(undefined), + close: vi.fn().mockResolvedValue(undefined), + isConnected: vi.fn().mockReturnValue(true), + getDialect: vi.fn().mockReturnValue("sqlite" as const), + getPlaceholder: vi.fn((_i: number) => "?"), + }; +} + +describe("IntegrationConfigService", () => { + let db: DatabaseAdapter; + let service: IntegrationConfigService; + + beforeEach(() => { + db = createMockDb(); + service = new IntegrationConfigService(db, TEST_SECRET); + }); + + // -------------------------------------------------------------------------- + // saveConfig + // -------------------------------------------------------------------------- + describe("saveConfig", () => { + it("inserts a config with upsert SQL", async () => { + await service.saveConfig("user-1", "proxmox", { host: "10.0.0.1", port: 8006 }); + + expect(db.execute).toHaveBeenCalledOnce(); + const [sql, params] = (db.execute as ReturnType).mock.calls[0]; + expect(sql).toContain("INSERT INTO integration_configs"); + expect(sql).toContain("ON CONFLICT"); + expect(params[1]).toBe("user-1"); + expect(params[2]).toBe("proxmox"); + }); + + it("encrypts sensitive fields before storing", async () => { + await service.saveConfig("user-1", "aws", { + region: "us-east-1", + accessKeyId: "AKIA...", + secretAccessKey: "wJalr...", + apiToken: "tok-123", + }); + + const [, params] = (db.execute as ReturnType).mock.calls[0]; + const storedConfig = JSON.parse(params[3] as string); + + // Non-sensitive fields remain plaintext + expect(storedConfig.region).toBe("us-east-1"); + + // Sensitive fields are encrypted envelopes (JSON strings with iv/salt/encrypted/tag) + for (const field of ["accessKeyId", "secretAccessKey", "apiToken"]) { + const envelope = JSON.parse(storedConfig[field]); + expect(envelope).toHaveProperty("iv"); + expect(envelope).toHaveProperty("salt"); + expect(envelope).toHaveProperty("encrypted"); + expect(envelope).toHaveProperty("tag"); + } + }); + + it("rejects invalid input (empty userId)", async () => { + await expect( + service.saveConfig("", "proxmox", { host: "10.0.0.1" }), + ).rejects.toThrow(); + }); + + it("rejects invalid input (empty integrationName)", async () => { + await expect( + service.saveConfig("user-1", "", { host: "10.0.0.1" }), + ).rejects.toThrow(); + }); + }); + + // -------------------------------------------------------------------------- + // getConfig + // -------------------------------------------------------------------------- + describe("getConfig", () => { + it("returns null when no record exists", async () => { + const result = await service.getConfig("user-1", "proxmox"); + expect(result).toBeNull(); + }); + + it("decrypts sensitive fields on retrieval", async () => { + // First save to capture the encrypted config + await service.saveConfig("user-1", "aws", { + region: "us-east-1", + secretAccessKey: "my-secret-value", + }); + + const [, saveParams] = (db.execute as ReturnType).mock.calls[0]; + const encryptedConfigJson = saveParams[3] as string; + + // Mock queryOne to return the saved row + (db.queryOne as ReturnType).mockResolvedValueOnce({ + id: "test-id", + userId: "user-1", + integrationName: "aws", + config: encryptedConfigJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }); + + const result = await service.getConfig("user-1", "aws"); + + expect(result).not.toBeNull(); + expect(result!.config.region).toBe("us-east-1"); + expect(result!.config.secretAccessKey).toBe("my-secret-value"); + expect(result!.isActive).toBe(true); + }); + }); + + // -------------------------------------------------------------------------- + // deleteConfig + // -------------------------------------------------------------------------- + describe("deleteConfig", () => { + it("executes DELETE with correct params", async () => { + await service.deleteConfig("user-1", "proxmox"); + + expect(db.execute).toHaveBeenCalledOnce(); + const [sql, params] = (db.execute as ReturnType).mock.calls[0]; + expect(sql).toContain("DELETE FROM integration_configs"); + expect(params).toEqual(["user-1", "proxmox"]); + }); + }); + + // -------------------------------------------------------------------------- + // listConfigs + // -------------------------------------------------------------------------- + describe("listConfigs", () => { + it("returns empty array when user has no configs", async () => { + const result = await service.listConfigs("user-1"); + expect(result).toEqual([]); + }); + + it("returns decrypted configs for a user", async () => { + // Save a config to capture encrypted JSON + await service.saveConfig("user-1", "proxmox", { + host: "10.0.0.1", + password: "admin123", + }); + + const [, saveParams] = (db.execute as ReturnType).mock.calls[0]; + const encryptedConfigJson = saveParams[3] as string; + + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "cfg-1", + userId: "user-1", + integrationName: "proxmox", + config: encryptedConfigJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }, + ]); + + const result = await service.listConfigs("user-1"); + + expect(result).toHaveLength(1); + expect(result[0].config.host).toBe("10.0.0.1"); + expect(result[0].config.password).toBe("admin123"); + }); + }); + + // -------------------------------------------------------------------------- + // getActiveConfigs + // -------------------------------------------------------------------------- + describe("getActiveConfigs", () => { + it("queries for active configs only", async () => { + await service.getActiveConfigs(); + + expect(db.query).toHaveBeenCalledOnce(); + const [sql] = (db.query as ReturnType).mock.calls[0]; + expect(sql).toContain("isActive = 1"); + }); + }); + + // -------------------------------------------------------------------------- + // Encryption round-trip + // -------------------------------------------------------------------------- + describe("encryption round-trip", () => { + it("preserves all sensitive field values through save/retrieve cycle", async () => { + const originalConfig = { + host: "10.0.0.1", + port: 8006, + apiToken: "PVEAPIToken=user@pam!mytoken=aaaa-bbbb-cccc", + password: "super-secret-password", + sshKey: "ssh-rsa AAAAB3...", + clientSecret: "cs_live_abc123", + enabled: true, + }; + + await service.saveConfig("user-1", "proxmox", originalConfig); + + const [, saveParams] = (db.execute as ReturnType).mock.calls[0]; + const encryptedConfigJson = saveParams[3] as string; + + (db.queryOne as ReturnType).mockResolvedValueOnce({ + id: "test-id", + userId: "user-1", + integrationName: "proxmox", + config: encryptedConfigJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }); + + const result = await service.getConfig("user-1", "proxmox"); + + expect(result!.config.host).toBe("10.0.0.1"); + expect(result!.config.port).toBe(8006); + expect(result!.config.apiToken).toBe("PVEAPIToken=user@pam!mytoken=aaaa-bbbb-cccc"); + expect(result!.config.password).toBe("super-secret-password"); + expect(result!.config.sshKey).toBe("ssh-rsa AAAAB3..."); + expect(result!.config.clientSecret).toBe("cs_live_abc123"); + expect(result!.config.enabled).toBe(true); + }); + + it("encrypted values at rest differ from plaintext", async () => { + await service.saveConfig("user-1", "aws", { + secretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + }); + + const [, params] = (db.execute as ReturnType).mock.calls[0]; + const storedConfig = JSON.parse(params[3] as string); + + // The stored value should NOT be the plaintext + expect(storedConfig.secretAccessKey).not.toBe( + "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + ); + }); + + it("handles null and undefined sensitive field values gracefully", async () => { + await service.saveConfig("user-1", "proxmox", { + host: "10.0.0.1", + password: null, + apiToken: undefined, + }); + + const [, params] = (db.execute as ReturnType).mock.calls[0]; + const storedConfig = JSON.parse(params[3] as string); + + // null/undefined sensitive fields should not be encrypted + expect(storedConfig.password).toBeNull(); + expect(storedConfig.apiToken).toBeUndefined(); + }); + }); + + // -------------------------------------------------------------------------- + // Sensitive field detection + // -------------------------------------------------------------------------- + describe("sensitive field detection", () => { + it("detects fields matching *token*, *password*, *secret*, *key* patterns", async () => { + const config = { + host: "plain", + apiToken: "sensitive", + API_TOKEN: "sensitive", + password: "sensitive", + dbPassword: "sensitive", + secretKey: "sensitive", + accessKey: "sensitive", + clientSecret: "sensitive", + region: "plain", + }; + + await service.saveConfig("user-1", "test", config); + + const [, params] = (db.execute as ReturnType).mock.calls[0]; + const stored = JSON.parse(params[3] as string); + + // Plaintext fields + expect(stored.host).toBe("plain"); + expect(stored.region).toBe("plain"); + + // Encrypted fields (should be JSON envelope strings) + for (const field of [ + "apiToken", "API_TOKEN", "password", "dbPassword", + "secretKey", "accessKey", "clientSecret", + ]) { + const envelope = JSON.parse(stored[field]); + expect(envelope).toHaveProperty("iv"); + expect(envelope).toHaveProperty("salt"); + expect(envelope).toHaveProperty("encrypted"); + expect(envelope).toHaveProperty("tag"); + } + }); + }); + + // -------------------------------------------------------------------------- + // getEffectiveConfig + // -------------------------------------------------------------------------- + describe("getEffectiveConfig", () => { + it("returns empty object when neither env nor DB config exists", async () => { + const result = await service.getEffectiveConfig("proxmox"); + expect(result).toEqual({}); + }); + + it("returns env config only when no DB config exists", async () => { + const envProvider = vi.fn().mockReturnValue({ host: "10.0.0.1", port: 8006 }); + const svcWithEnv = new IntegrationConfigService(db, TEST_SECRET, envProvider); + + const result = await svcWithEnv.getEffectiveConfig("proxmox"); + + expect(envProvider).toHaveBeenCalledWith("proxmox"); + expect(result).toEqual({ host: "10.0.0.1", port: 8006 }); + }); + + it("returns DB config only when no env provider is set", async () => { + // Save a config to capture encrypted JSON + await service.saveConfig("user-1", "proxmox", { host: "db-host", port: 9999 }); + const [, saveParams] = (db.execute as ReturnType).mock.calls[0]; + const encryptedConfigJson = saveParams[3] as string; + + (db.queryOne as ReturnType).mockResolvedValueOnce({ + id: "cfg-1", + userId: "user-1", + integrationName: "proxmox", + config: encryptedConfigJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }); + + const result = await service.getEffectiveConfig("proxmox"); + + expect(result).toEqual({ host: "db-host", port: 9999 }); + }); + + it("merges env and DB configs with DB overriding non-null keys", async () => { + const envProvider = vi.fn().mockReturnValue({ + host: "env-host", + port: 8006, + region: "us-east-1", + }); + const svcWithEnv = new IntegrationConfigService(db, TEST_SECRET, envProvider); + + // Save a config to capture encrypted JSON (DB overrides host, adds extra) + await svcWithEnv.saveConfig("user-1", "proxmox", { host: "db-host", extra: "value" }); + const [, saveParams] = (db.execute as ReturnType).mock.calls[0]; + const encryptedConfigJson = saveParams[3] as string; + + (db.queryOne as ReturnType).mockResolvedValueOnce({ + id: "cfg-1", + userId: "user-1", + integrationName: "proxmox", + config: encryptedConfigJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }); + + const result = await svcWithEnv.getEffectiveConfig("proxmox"); + + // DB overrides env for "host", env "port" and "region" preserved, DB "extra" added + expect(result.host).toBe("db-host"); + expect(result.port).toBe(8006); + expect(result.region).toBe("us-east-1"); + expect(result.extra).toBe("value"); + }); + + it("does not override env values with null DB values", async () => { + const envProvider = vi.fn().mockReturnValue({ host: "env-host", port: 8006 }); + const svcWithEnv = new IntegrationConfigService(db, TEST_SECRET, envProvider); + + // Simulate a DB config where host is null (stored as plaintext null in JSON) + const dbConfigJson = JSON.stringify({ host: null, extra: "db-extra" }); + + (db.queryOne as ReturnType).mockResolvedValueOnce({ + id: "cfg-1", + userId: "user-1", + integrationName: "proxmox", + config: dbConfigJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }); + + const result = await svcWithEnv.getEffectiveConfig("proxmox"); + + // null DB value should NOT override env value + expect(result.host).toBe("env-host"); + expect(result.port).toBe(8006); + expect(result.extra).toBe("db-extra"); + }); + + it("returns env config when env provider returns empty object and no DB config", async () => { + const envProvider = vi.fn().mockReturnValue({}); + const svcWithEnv = new IntegrationConfigService(db, TEST_SECRET, envProvider); + + // envProvider returns {} which is truthy, so it's treated as envConfig + const result = await svcWithEnv.getEffectiveConfig("unknown"); + expect(result).toEqual({}); + }); + }); + + // -------------------------------------------------------------------------- + // rotateEncryptionKey + // -------------------------------------------------------------------------- + describe("rotateEncryptionKey", () => { + const OLD_SECRET = "old-secret-key-for-testing"; + const NEW_SECRET = "new-secret-key-for-testing"; + + it("re-encrypts all configs from old key to new key", async () => { + // Create a service with the old secret + const oldService = new IntegrationConfigService(db, OLD_SECRET); + + // Save a config (encrypted with OLD_SECRET) + await oldService.saveConfig("user-1", "proxmox", { + host: "10.0.0.1", + password: "super-secret", + }); + const [, saveParams] = (db.execute as ReturnType).mock.calls[0]; + const encryptedConfigJson = saveParams[3] as string; + + // Mock withTransaction to execute the callback + (db.withTransaction as ReturnType).mockImplementation( + async (fn: () => Promise) => fn(), + ); + + // Mock query to return the saved row + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "cfg-1", + userId: "user-1", + integrationName: "proxmox", + config: encryptedConfigJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }, + ]); + + await oldService.rotateEncryptionKey(OLD_SECRET, NEW_SECRET); + + // Verify execute was called with UPDATE (the second call, first was INSERT) + const executeCalls = (db.execute as ReturnType).mock.calls; + const updateCall = executeCalls[executeCalls.length - 1]; + expect(updateCall[0]).toContain("UPDATE integration_configs"); + expect(updateCall[1][2]).toBe("cfg-1"); // id + + // Verify the re-encrypted config can be decrypted with the new key + const reEncryptedJson = updateCall[1][0] as string; + const reEncryptedConfig = JSON.parse(reEncryptedJson); + + // host should remain plaintext + expect(reEncryptedConfig.host).toBe("10.0.0.1"); + + // password should be an encrypted envelope (re-encrypted with new key) + const envelope = JSON.parse(reEncryptedConfig.password); + expect(envelope).toHaveProperty("iv"); + expect(envelope).toHaveProperty("salt"); + expect(envelope).toHaveProperty("encrypted"); + expect(envelope).toHaveProperty("tag"); + + // Now verify the new service (with NEW_SECRET) can decrypt it + (db.queryOne as ReturnType).mockResolvedValueOnce({ + id: "cfg-1", + userId: "user-1", + integrationName: "proxmox", + config: reEncryptedJson, + isActive: 1, + createdAt: "2024-01-01T00:00:00.000Z", + updatedAt: "2024-01-01T00:00:00.000Z", + }); + + // After rotation, the service's secret should be updated to NEW_SECRET + const result = await oldService.getConfig("user-1", "proxmox"); + expect(result!.config.host).toBe("10.0.0.1"); + expect(result!.config.password).toBe("super-secret"); + }); + + it("uses withTransaction for atomicity", async () => { + (db.withTransaction as ReturnType).mockImplementation( + async (fn: () => Promise) => fn(), + ); + (db.query as ReturnType).mockResolvedValueOnce([]); + + await service.rotateEncryptionKey(TEST_SECRET, "new-key"); + + expect(db.withTransaction).toHaveBeenCalledOnce(); + }); + + it("handles empty config table gracefully", async () => { + (db.withTransaction as ReturnType).mockImplementation( + async (fn: () => Promise) => fn(), + ); + (db.query as ReturnType).mockResolvedValueOnce([]); + + await service.rotateEncryptionKey(TEST_SECRET, "new-key"); + + // Only the withTransaction query should have been called, no UPDATEs + const executeCalls = (db.execute as ReturnType).mock.calls; + expect(executeCalls).toHaveLength(0); + }); + }); +}); diff --git a/backend/test/services/JournalService.test.ts b/backend/test/services/JournalService.test.ts new file mode 100644 index 00000000..b4d439b9 --- /dev/null +++ b/backend/test/services/JournalService.test.ts @@ -0,0 +1,526 @@ +import { describe, it, expect, beforeEach, vi } from "vitest"; +import { JournalService, type LiveSource } from "../../src/services/journal/JournalService"; +import type { DatabaseAdapter } from "../../src/database/DatabaseAdapter"; +import type { CreateJournalEntry } from "../../src/services/journal/types"; + +function createMockDb(): DatabaseAdapter { + return { + query: vi.fn().mockResolvedValue([]), + queryOne: vi.fn().mockResolvedValue(null), + execute: vi.fn().mockResolvedValue({ changes: 1 }), + beginTransaction: vi.fn().mockResolvedValue(undefined), + commit: vi.fn().mockResolvedValue(undefined), + rollback: vi.fn().mockResolvedValue(undefined), + withTransaction: vi.fn(), + initialize: vi.fn().mockResolvedValue(undefined), + close: vi.fn().mockResolvedValue(undefined), + isConnected: vi.fn().mockReturnValue(true), + getDialect: vi.fn().mockReturnValue("sqlite" as const), + getPlaceholder: vi.fn((_i: number) => "?"), + }; +} + +describe("JournalService", () => { + let db: DatabaseAdapter; + let service: JournalService; + + beforeEach(() => { + db = createMockDb(); + service = new JournalService(db); + }); + + describe("recordEvent", () => { + it("inserts a validated entry and returns an id", async () => { + const entry: CreateJournalEntry = { + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Created VM 100", + }; + + const id = await service.recordEvent(entry); + + expect(id).toBeTruthy(); + expect(typeof id).toBe("string"); + expect(db.execute).toHaveBeenCalledOnce(); + + const [sql, params] = (db.execute as ReturnType).mock.calls[0]; + expect(sql).toContain("INSERT INTO journal_entries"); + expect(params[1]).toBe("node-1"); + expect(params[2]).toBe("proxmox:100"); + expect(params[3]).toBe("provision"); + expect(params[4]).toBe("proxmox"); + }); + + it("includes userId and details when provided", async () => { + const entry: CreateJournalEntry = { + nodeId: "node-2", + nodeUri: "aws:us-east-1:i-abc", + eventType: "start", + source: "aws", + action: "start_instance", + summary: "Started instance", + details: { instanceId: "i-abc" }, + userId: "user-42", + }; + + await service.recordEvent(entry); + + const [, params] = (db.execute as ReturnType).mock.calls[0]; + expect(params[7]).toBe(JSON.stringify({ instanceId: "i-abc" })); + expect(params[8]).toBe("user-42"); + }); + + it("rejects invalid source", async () => { + const entry = { + nodeId: "node-1", + nodeUri: "bad:100", + eventType: "provision", + source: "invalid_source", + action: "create", + summary: "test", + }; + + await expect(service.recordEvent(entry as CreateJournalEntry)).rejects.toThrow(); + expect(db.execute).not.toHaveBeenCalled(); + }); + + it("rejects invalid eventType", async () => { + const entry = { + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "invalid_type", + source: "proxmox", + action: "create", + summary: "test", + }; + + await expect(service.recordEvent(entry as CreateJournalEntry)).rejects.toThrow(); + expect(db.execute).not.toHaveBeenCalled(); + }); + + it("rejects empty nodeId", async () => { + const entry = { + nodeId: "", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create", + summary: "test", + }; + + await expect(service.recordEvent(entry as CreateJournalEntry)).rejects.toThrow(); + }); + }); + + describe("addNote", () => { + it("creates a note entry with eventType 'note' and source 'user'", async () => { + const id = await service.addNote("node-1", "user-1", "Manual observation"); + + expect(id).toBeTruthy(); + expect(db.execute).toHaveBeenCalledOnce(); + + const [, params] = (db.execute as ReturnType).mock.calls[0]; + expect(params[1]).toBe("node-1"); + expect(params[2]).toBe("user:node-1"); + expect(params[3]).toBe("note"); + expect(params[4]).toBe("user"); + expect(params[5]).toBe("add_note"); + expect(params[6]).toBe("Manual observation"); + expect(params[8]).toBe("user-1"); + }); + }); + + describe("getNodeTimeline", () => { + it("queries with default pagination", async () => { + await service.getNodeTimeline("node-1"); + + expect(db.query).toHaveBeenCalledOnce(); + const [sql, params] = (db.query as ReturnType).mock.calls[0]; + expect(sql).toContain("WHERE nodeId = ?"); + expect(sql).toContain("ORDER BY timestamp DESC"); + expect(sql).toContain("LIMIT ? OFFSET ?"); + expect(params[0]).toBe("node-1"); + expect(params[1]).toBe(50); + expect(params[2]).toBe(0); + }); + + it("applies eventType and source filters", async () => { + await service.getNodeTimeline("node-1", { + eventType: "provision", + source: "proxmox", + limit: 10, + offset: 5, + }); + + const [sql, params] = (db.query as ReturnType).mock.calls[0]; + expect(sql).toContain("AND eventType = ?"); + expect(sql).toContain("AND source = ?"); + expect(params).toEqual(["node-1", "provision", "proxmox", 10, 5]); + }); + + it("applies date range filters", async () => { + await service.getNodeTimeline("node-1", { + startDate: "2024-01-01T00:00:00.000Z", + endDate: "2024-12-31T23:59:59.000Z", + }); + + const [sql, params] = (db.query as ReturnType).mock.calls[0]; + expect(sql).toContain("AND timestamp >= ?"); + expect(sql).toContain("AND timestamp <= ?"); + expect(params[1]).toBe("2024-01-01T00:00:00.000Z"); + expect(params[2]).toBe("2024-12-31T23:59:59.000Z"); + }); + + it("parses JSON details and sets isLive=false", async () => { + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "e1", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Created VM", + details: '{"vmid":100}', + userId: null, + timestamp: "2024-06-01T00:00:00.000Z", + }, + ]); + + const entries = await service.getNodeTimeline("node-1"); + + expect(entries).toHaveLength(1); + expect(entries[0].details).toEqual({ vmid: 100 }); + expect(entries[0].isLive).toBe(false); + }); + }); + + describe("searchEntries", () => { + it("searches summary and details with LIKE", async () => { + await service.searchEntries("provision"); + + expect(db.query).toHaveBeenCalledOnce(); + const [sql, params] = (db.query as ReturnType).mock.calls[0]; + expect(sql).toContain("WHERE summary LIKE ?"); + expect(sql).toContain("OR details LIKE ?"); + expect(params[0]).toBe("%provision%"); + expect(params[1]).toBe("%provision%"); + expect(params[2]).toBe(50); + expect(params[3]).toBe(0); + }); + + it("applies custom pagination", async () => { + await service.searchEntries("error", { limit: 20, offset: 10 }); + + const [, params] = (db.query as ReturnType).mock.calls[0]; + expect(params[2]).toBe(20); + expect(params[3]).toBe(10); + }); + + it("sets isLive=false on search results", async () => { + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "e1", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "note", + source: "user", + action: "add_note", + summary: "Found an issue", + details: "{}", + userId: "user-1", + timestamp: "2024-06-01T00:00:00.000Z", + }, + ]); + + const results = await service.searchEntries("issue"); + + expect(results).toHaveLength(1); + expect(results[0].isLive).toBe(false); + expect(results[0].details).toEqual({}); + }); + }); + + describe("aggregateTimeline", () => { + function createMockLiveSource( + events: unknown[] = [], + initialized = true + ): LiveSource { + return { + getNodeData: vi.fn().mockResolvedValue(events), + isInitialized: vi.fn().mockReturnValue(initialized), + }; + } + + it("returns DB events with isLive=false when no live sources", async () => { + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "db-1", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Created VM", + details: "{}", + userId: null, + timestamp: "2024-06-01T00:00:00.000Z", + }, + ]); + + const entries = await service.aggregateTimeline("node-1"); + + expect(entries).toHaveLength(1); + expect(entries[0].isLive).toBe(false); + expect(entries[0].id).toBe("db-1"); + }); + + it("merges DB and live events, marking isLive correctly", async () => { + const liveSources = new Map(); + liveSources.set( + "puppetdb", + createMockLiveSource([ + { + id: "live-1", + nodeId: "node-1", + nodeUri: "puppetdb:node-1", + eventType: "puppet_run", + action: "apply", + summary: "Puppet run completed", + timestamp: "2024-06-02T00:00:00.000Z", + }, + ]) + ); + + const svcWithLive = new JournalService(db, liveSources); + + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "db-1", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Created VM", + details: "{}", + userId: null, + timestamp: "2024-06-01T00:00:00.000Z", + }, + ]); + + const entries = await svcWithLive.aggregateTimeline("node-1"); + + expect(entries).toHaveLength(2); + // Live event is newer, should come first (descending) + expect(entries[0].isLive).toBe(true); + expect(entries[0].id).toBe("live-1"); + expect(entries[0].source).toBe("puppetdb"); + expect(entries[1].isLive).toBe(false); + expect(entries[1].id).toBe("db-1"); + }); + + it("sorts merged results by timestamp descending", async () => { + const liveSources = new Map(); + liveSources.set( + "puppetdb", + createMockLiveSource([ + { + id: "live-old", + nodeId: "node-1", + nodeUri: "puppetdb:node-1", + eventType: "info", + action: "report", + summary: "Old live event", + timestamp: "2024-01-01T00:00:00.000Z", + }, + { + id: "live-new", + nodeId: "node-1", + nodeUri: "puppetdb:node-1", + eventType: "info", + action: "report", + summary: "New live event", + timestamp: "2024-12-01T00:00:00.000Z", + }, + ]) + ); + + const svcWithLive = new JournalService(db, liveSources); + + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "db-mid", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "start", + source: "proxmox", + action: "start_vm", + summary: "Started VM", + details: "{}", + userId: null, + timestamp: "2024-06-15T00:00:00.000Z", + }, + ]); + + const entries = await svcWithLive.aggregateTimeline("node-1"); + + expect(entries).toHaveLength(3); + expect(entries[0].id).toBe("live-new"); + expect(entries[1].id).toBe("db-mid"); + expect(entries[2].id).toBe("live-old"); + }); + + it("applies limit/offset pagination to merged results", async () => { + const liveSources = new Map(); + liveSources.set( + "puppetdb", + createMockLiveSource([ + { + id: "live-1", + nodeId: "node-1", + nodeUri: "puppetdb:node-1", + eventType: "info", + action: "report", + summary: "Live event", + timestamp: "2024-06-03T00:00:00.000Z", + }, + ]) + ); + + const svcWithLive = new JournalService(db, liveSources); + + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "db-1", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Created VM", + details: "{}", + userId: null, + timestamp: "2024-06-02T00:00:00.000Z", + }, + { + id: "db-2", + nodeId: "node-1", + nodeUri: "proxmox:101", + eventType: "start", + source: "proxmox", + action: "start_vm", + summary: "Started VM", + details: "{}", + userId: null, + timestamp: "2024-06-01T00:00:00.000Z", + }, + ]); + + const entries = await svcWithLive.aggregateTimeline("node-1", { + limit: 2, + offset: 1, + }); + + expect(entries).toHaveLength(2); + // Skipped first (live-1), got db-1 and db-2 + expect(entries[0].id).toBe("db-1"); + expect(entries[1].id).toBe("db-2"); + }); + + it("gracefully skips failed live sources", async () => { + const liveSources = new Map(); + liveSources.set("puppetdb", { + getNodeData: vi.fn().mockRejectedValue(new Error("Connection refused")), + isInitialized: vi.fn().mockReturnValue(true), + }); + + const svcWithLive = new JournalService(db, liveSources); + + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "db-1", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Created VM", + details: "{}", + userId: null, + timestamp: "2024-06-01T00:00:00.000Z", + }, + ]); + + const entries = await svcWithLive.aggregateTimeline("node-1"); + + expect(entries).toHaveLength(1); + expect(entries[0].isLive).toBe(false); + expect(entries[0].id).toBe("db-1"); + }); + + it("skips uninitialized live sources", async () => { + const liveSources = new Map(); + liveSources.set("puppetdb", createMockLiveSource([], false)); + + const svcWithLive = new JournalService(db, liveSources); + + (db.query as ReturnType).mockResolvedValueOnce([]); + + const entries = await svcWithLive.aggregateTimeline("node-1"); + + expect(entries).toHaveLength(0); + expect(liveSources.get("puppetdb")!.getNodeData).not.toHaveBeenCalled(); + }); + + it("handles multiple live sources with partial failures", async () => { + const liveSources = new Map(); + liveSources.set("puppetdb", { + getNodeData: vi.fn().mockRejectedValue(new Error("timeout")), + isInitialized: vi.fn().mockReturnValue(true), + }); + liveSources.set( + "aws", + createMockLiveSource([ + { + id: "aws-1", + nodeId: "node-1", + nodeUri: "aws:i-123", + eventType: "info", + action: "status_check", + summary: "AWS status", + timestamp: "2024-06-05T00:00:00.000Z", + }, + ]) + ); + + const svcWithLive = new JournalService(db, liveSources); + + (db.query as ReturnType).mockResolvedValueOnce([ + { + id: "db-1", + nodeId: "node-1", + nodeUri: "proxmox:100", + eventType: "provision", + source: "proxmox", + action: "create_vm", + summary: "Created VM", + details: "{}", + userId: null, + timestamp: "2024-06-01T00:00:00.000Z", + }, + ]); + + const entries = await svcWithLive.aggregateTimeline("node-1"); + + expect(entries).toHaveLength(2); + // AWS live event is newer + expect(entries[0].isLive).toBe(true); + expect(entries[0].source).toBe("aws"); + expect(entries[1].isLive).toBe(false); + }); + }); +}); diff --git a/backend/test/services/PermissionService.test.ts b/backend/test/services/PermissionService.test.ts index 1f7d6f74..926d0bae 100644 --- a/backend/test/services/PermissionService.test.ts +++ b/backend/test/services/PermissionService.test.ts @@ -1,11 +1,12 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { Database } from 'sqlite3'; import { PermissionService, CreatePermissionDTO } from '../../src/services/PermissionService'; +import { SQLiteAdapter } from '../../src/database/SQLiteAdapter'; +import type { DatabaseAdapter } from '../../src/database/DatabaseAdapter'; import { promises as fs } from 'fs'; import path from 'path'; describe('PermissionService', () => { - let db: Database; + let db: DatabaseAdapter; let permissionService: PermissionService; const testDbPath = path.join(__dirname, '../../test-permission-service.db'); @@ -17,39 +18,29 @@ describe('PermissionService', () => { // Ignore if file doesn't exist } - // Create new database - db = new Database(testDbPath); + // Create new database using SQLiteAdapter + db = new SQLiteAdapter(testDbPath); + await db.initialize(); // Create permissions table - await new Promise((resolve, reject) => { - db.exec( - ` - CREATE TABLE permissions ( - id TEXT PRIMARY KEY, - resource TEXT NOT NULL, - action TEXT NOT NULL, - description TEXT NOT NULL DEFAULT '', - createdAt TEXT NOT NULL, - UNIQUE(resource, action) - ); - - CREATE INDEX idx_permissions_resource_action ON permissions(resource, action); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute(` + CREATE TABLE permissions ( + id TEXT PRIMARY KEY, + resource TEXT NOT NULL, + action TEXT NOT NULL, + description TEXT NOT NULL DEFAULT '', + createdAt TEXT NOT NULL, + UNIQUE(resource, action) + ) + `); + await db.execute(`CREATE INDEX idx_permissions_resource_action ON permissions(resource, action)`); permissionService = new PermissionService(db); }); afterEach(async () => { // Close database - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); // Remove test database try { @@ -467,91 +458,21 @@ describe('PermissionService', () => { beforeEach(async () => { // Create full RBAC schema for permission checking tests - await new Promise((resolve, reject) => { - db.exec( - ` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT NOT NULL UNIQUE, - email TEXT NOT NULL UNIQUE, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER NOT NULL DEFAULT 1, - isAdmin INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - isBuiltIn INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, - FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE - ); - - CREATE INDEX idx_user_roles_user ON user_roles(userId); - CREATE INDEX idx_user_roles_role ON user_roles(roleId); - CREATE INDEX idx_user_groups_user ON user_groups(userId); - CREATE INDEX idx_user_groups_group ON user_groups(groupId); - CREATE INDEX idx_group_roles_group ON group_roles(groupId); - CREATE INDEX idx_group_roles_role ON group_roles(roleId); - CREATE INDEX idx_role_permissions_role ON role_permissions(roleId); - CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute(`CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE, passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, isActive INTEGER NOT NULL DEFAULT 1, isAdmin INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT )`); +await db.execute(`CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); +await db.execute(`CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, isBuiltIn INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); +await db.execute(`CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId), FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId), FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE )`); +await db.execute(`CREATE INDEX idx_user_roles_user ON user_roles(userId)`); +await db.execute(`CREATE INDEX idx_user_roles_role ON user_roles(roleId)`); +await db.execute(`CREATE INDEX idx_user_groups_user ON user_groups(userId)`); +await db.execute(`CREATE INDEX idx_user_groups_group ON user_groups(groupId)`); +await db.execute(`CREATE INDEX idx_group_roles_group ON group_roles(groupId)`); +await db.execute(`CREATE INDEX idx_group_roles_role ON group_roles(roleId)`); +await db.execute(`CREATE INDEX idx_role_permissions_role ON role_permissions(roleId)`); +await db.execute(`CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId)`); // Create test data const now = new Date().toISOString(); @@ -561,83 +482,47 @@ describe('PermissionService', () => { adminUserId = 'admin-456'; // pragma: allowlist secret inactiveUserId = 'inactive-789'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [userId, 'testuser', 'test@example.com', 'hash', 'Test', 'User', 1, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [userId, 'testuser', 'test@example.com', 'hash', 'Test', 'User', 1, 0, now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [adminUserId, 'admin', 'admin@example.com', 'hash', 'Admin', 'User', 1, 1, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [adminUserId, 'admin', 'admin@example.com', 'hash', 'Admin', 'User', 1, 1, now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [inactiveUserId, 'inactive', 'inactive@example.com', 'hash', 'Inactive', 'User', 0, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [inactiveUserId, 'inactive', 'inactive@example.com', 'hash', 'Inactive', 'User', 0, 0, now, now] +); // Create group groupId = 'group-123'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO groups (id, name, description, createdAt, updatedAt) + await db.execute( + `INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)`, - [groupId, 'Test Group', 'Test group description', now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [groupId, 'Test Group', 'Test group description', now, now] +); // Create roles roleId = 'role-123'; // pragma: allowlist secret groupRoleId = 'role-456'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [roleId, 'Test Role', 'Test role description', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [roleId, 'Test Role', 'Test role description', 0, now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [groupRoleId, 'Group Role', 'Group role description', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [groupRoleId, 'Group Role', 'Group role description', 0, now, now] +); // Create permission const permission = await permissionService.createPermission({ @@ -656,27 +541,15 @@ describe('PermissionService', () => { it('should return false for inactive user (Requirement 5.6)', async () => { // Assign role and permission to inactive user const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [inactiveUserId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [inactiveUserId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permissionId, now] +); const hasAccess = await permissionService.hasPermission(inactiveUserId, 'ansible', 'read'); expect(hasAccess).toBe(false); @@ -696,28 +569,16 @@ describe('PermissionService', () => { const now = new Date().toISOString(); // Assign role to user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); // Assign permission to role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permissionId, now] +); const hasAccess = await permissionService.hasPermission(userId, 'ansible', 'read'); expect(hasAccess).toBe(true); @@ -727,40 +588,22 @@ describe('PermissionService', () => { const now = new Date().toISOString(); // Add user to group - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, groupId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, groupId, now] +); // Assign role to group - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [groupId, groupRoleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [groupId, groupRoleId, now] +); // Assign permission to role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [groupRoleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [groupRoleId, permissionId, now] +); const hasAccess = await permissionService.hasPermission(userId, 'ansible', 'read'); expect(hasAccess).toBe(true); @@ -770,61 +613,31 @@ describe('PermissionService', () => { const now = new Date().toISOString(); // Path 1: Direct role assignment - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permissionId, now] +); // Path 2: Group role assignment - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, groupId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, groupId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [groupId, groupRoleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [groupId, groupRoleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [groupRoleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [groupRoleId, permissionId, now] +); const hasAccess = await permissionService.hasPermission(userId, 'ansible', 'read'); expect(hasAccess).toBe(true); @@ -839,16 +652,10 @@ describe('PermissionService', () => { const now = new Date().toISOString(); // Assign role to user but don't assign permission to role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); const hasAccess = await permissionService.hasPermission(userId, 'ansible', 'read'); expect(hasAccess).toBe(false); @@ -858,27 +665,15 @@ describe('PermissionService', () => { const now = new Date().toISOString(); // Assign ansible:read permission - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permissionId, now] +); // Check for different resource const hasAccess = await permissionService.hasPermission(userId, 'bolt', 'read'); @@ -889,27 +684,15 @@ describe('PermissionService', () => { const now = new Date().toISOString(); // Assign ansible:read permission - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permissionId, now] +); // Check for different action const hasAccess = await permissionService.hasPermission(userId, 'ansible', 'write'); @@ -923,87 +706,45 @@ describe('PermissionService', () => { const group2Id = 'group-456'; // pragma: allowlist secret const role2Id = 'role-789'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO groups (id, name, description, createdAt, updatedAt) + await db.execute( + `INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)`, - [group2Id, 'Test Group 2', 'Second test group', now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [group2Id, 'Test Group 2', 'Second test group', now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [role2Id, 'Test Role 2', 'Second test role', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [role2Id, 'Test Role 2', 'Second test role', 0, now, now] +); // Add user to both groups - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, groupId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, groupId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, group2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, group2Id, now] +); // Assign roles to groups - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [groupId, groupRoleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [groupId, groupRoleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [group2Id, role2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [group2Id, role2Id, now] +); // Assign permission to second role only - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [role2Id, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [role2Id, permissionId, now] +); const hasAccess = await permissionService.hasPermission(userId, 'ansible', 'read'); expect(hasAccess).toBe(true); @@ -1015,52 +756,28 @@ describe('PermissionService', () => { // Create second role const role2Id = 'role-789'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [role2Id, 'Test Role 2', 'Second test role', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [role2Id, 'Test Role 2', 'Second test role', 0, now, now] +); // Assign both roles to user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, role2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, role2Id, now] +); // Assign permission to second role only - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [role2Id, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [role2Id, permissionId, now] +); const hasAccess = await permissionService.hasPermission(userId, 'ansible', 'read'); expect(hasAccess).toBe(true); @@ -1069,7 +786,7 @@ describe('PermissionService', () => { }); describe('PermissionService - getUserPermissions', () => { - let db: Database; + let db: DatabaseAdapter; let permissionService: PermissionService; const testDbPath = path.join(__dirname, '../../test-permission-service-get-user-perms.db'); let userId: string; @@ -1091,105 +808,27 @@ describe('PermissionService - getUserPermissions', () => { } // Create new database - db = new Database(testDbPath); + db = new SQLiteAdapter(testDbPath); + await db.initialize(); // Create full RBAC schema for getUserPermissions tests - await new Promise((resolve, reject) => { - db.exec( - ` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT NOT NULL UNIQUE, - email TEXT NOT NULL UNIQUE, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER NOT NULL DEFAULT 1, - isAdmin INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - isBuiltIn INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, - FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE - ); - - CREATE INDEX idx_user_roles_user ON user_roles(userId); - CREATE INDEX idx_user_roles_role ON user_roles(roleId); - CREATE INDEX idx_user_groups_user ON user_groups(userId); - CREATE INDEX idx_user_groups_group ON user_groups(groupId); - CREATE INDEX idx_group_roles_group ON group_roles(groupId); - CREATE INDEX idx_group_roles_role ON group_roles(roleId); - CREATE INDEX idx_role_permissions_role ON role_permissions(roleId); - CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId); - - CREATE TABLE permissions ( - id TEXT PRIMARY KEY, - resource TEXT NOT NULL, - action TEXT NOT NULL, - description TEXT NOT NULL DEFAULT '', - createdAt TEXT NOT NULL, - UNIQUE(resource, action) - ); - - CREATE INDEX idx_permissions_resource_action ON permissions(resource, action); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute(`CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE, passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, isActive INTEGER NOT NULL DEFAULT 1, isAdmin INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT )`); +await db.execute(`CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); +await db.execute(`CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, isBuiltIn INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); +await db.execute(`CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId), FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId), FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE )`); +await db.execute(`CREATE INDEX idx_user_roles_user ON user_roles(userId)`); +await db.execute(`CREATE INDEX idx_user_roles_role ON user_roles(roleId)`); +await db.execute(`CREATE INDEX idx_user_groups_user ON user_groups(userId)`); +await db.execute(`CREATE INDEX idx_user_groups_group ON user_groups(groupId)`); +await db.execute(`CREATE INDEX idx_group_roles_group ON group_roles(groupId)`); +await db.execute(`CREATE INDEX idx_group_roles_role ON group_roles(roleId)`); +await db.execute(`CREATE INDEX idx_role_permissions_role ON role_permissions(roleId)`); +await db.execute(`CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId)`); +await db.execute(`CREATE TABLE permissions ( id TEXT PRIMARY KEY, resource TEXT NOT NULL, action TEXT NOT NULL, description TEXT NOT NULL DEFAULT '', createdAt TEXT NOT NULL, UNIQUE(resource, action) )`); +await db.execute(`CREATE INDEX idx_permissions_resource_action ON permissions(resource, action)`); // Initialize permission service permissionService = new PermissionService(db); @@ -1202,83 +841,47 @@ describe('PermissionService - getUserPermissions', () => { adminUserId = 'admin-456'; // pragma: allowlist secret inactiveUserId = 'inactive-789'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [userId, 'testuser', 'test@example.com', 'hash', 'Test', 'User', 1, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [userId, 'testuser', 'test@example.com', 'hash', 'Test', 'User', 1, 0, now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [adminUserId, 'admin', 'admin@example.com', 'hash', 'Admin', 'User', 1, 1, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [adminUserId, 'admin', 'admin@example.com', 'hash', 'Admin', 'User', 1, 1, now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [inactiveUserId, 'inactive', 'inactive@example.com', 'hash', 'Inactive', 'User', 0, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [inactiveUserId, 'inactive', 'inactive@example.com', 'hash', 'Inactive', 'User', 0, 0, now, now] +); // Create group groupId = 'group-123'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO groups (id, name, description, createdAt, updatedAt) + await db.execute( + `INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)`, - [groupId, 'Test Group', 'Test group description', now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [groupId, 'Test Group', 'Test group description', now, now] +); // Create roles roleId = 'role-123'; // pragma: allowlist secret groupRoleId = 'role-456'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [roleId, 'Test Role', 'Test role description', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [roleId, 'Test Role', 'Test role description', 0, now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [groupRoleId, 'Group Role', 'Group role description', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [groupRoleId, 'Group Role', 'Group role description', 0, now, now] +); // Create permissions const perm1 = await permissionService.createPermission({ @@ -1311,27 +914,15 @@ describe('PermissionService - getUserPermissions', () => { it('should return empty array for inactive user (Requirement 8.6)', async () => { // Assign role and permission to inactive user const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [inactiveUserId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [inactiveUserId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission1Id, now] +); const permissions = await permissionService.getUserPermissions(inactiveUserId); expect(permissions).toEqual([]); @@ -1352,39 +943,21 @@ describe('PermissionService - getUserPermissions', () => { const now = new Date().toISOString(); // Assign role to user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); // Assign permissions to role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission1Id, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission2Id, now] +); const permissions = await permissionService.getUserPermissions(userId); @@ -1399,40 +972,22 @@ describe('PermissionService - getUserPermissions', () => { const now = new Date().toISOString(); // Add user to group - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, groupId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, groupId, now] +); // Assign role to group - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [groupId, groupRoleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [groupId, groupRoleId, now] +); // Assign permission to role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [groupRoleId, permission3Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [groupRoleId, permission3Id, now] +); const permissions = await permissionService.getUserPermissions(userId); @@ -1445,61 +1000,31 @@ describe('PermissionService - getUserPermissions', () => { const now = new Date().toISOString(); // Path 1: Direct role assignment with permission1 - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission1Id, now] +); // Path 2: Group role assignment with same permission1 - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, groupId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, groupId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [groupId, groupRoleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [groupId, groupRoleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [groupRoleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [groupRoleId, permission1Id, now] +); const permissions = await permissionService.getUserPermissions(userId); @@ -1513,72 +1038,36 @@ describe('PermissionService - getUserPermissions', () => { const now = new Date().toISOString(); // Direct role with permission1 and permission2 - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission1Id, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission2Id, now] +); // Group role with permission3 - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, groupId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, groupId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [groupId, groupRoleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [groupId, groupRoleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [groupRoleId, permission3Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [groupRoleId, permission3Id, now] +); const permissions = await permissionService.getUserPermissions(userId); @@ -1607,61 +1096,31 @@ describe('PermissionService - getUserPermissions', () => { }); // Assign role to user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); // Assign permissions in non-alphabetical order - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission3Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); - - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); - - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, perm5.id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); - - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, perm4.id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission3Id, now] +); + + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission1Id, now] +); + + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, perm5.id, now] +); + + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, perm4.id, now] +); const permissions = await permissionService.getUserPermissions(userId); @@ -1684,16 +1143,10 @@ describe('PermissionService - getUserPermissions', () => { const now = new Date().toISOString(); // Assign role to user but don't assign any permissions to role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); const permissions = await permissionService.getUserPermissions(userId); expect(permissions).toEqual([]); @@ -1706,98 +1159,50 @@ describe('PermissionService - getUserPermissions', () => { const group2Id = 'group-456'; // pragma: allowlist secret const role2Id = 'role-789'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO groups (id, name, description, createdAt, updatedAt) + await db.execute( + `INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)`, - [group2Id, 'Test Group 2', 'Second test group', now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [group2Id, 'Test Group 2', 'Second test group', now, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [role2Id, 'Test Role 2', 'Second test role', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [role2Id, 'Test Role 2', 'Second test role', 0, now, now] +); // Add user to both groups - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, groupId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, groupId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, - [userId, group2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, + [userId, group2Id, now] +); // Assign roles to groups - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [groupId, groupRoleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [groupId, groupRoleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, - [group2Id, role2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, + [group2Id, role2Id, now] +); // Assign different permissions to each role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [groupRoleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [groupRoleId, permission1Id, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [role2Id, permission2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [role2Id, permission2Id, now] +); const permissions = await permissionService.getUserPermissions(userId); @@ -1814,63 +1219,33 @@ describe('PermissionService - getUserPermissions', () => { // Create second role const role2Id = 'role-789'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [role2Id, 'Test Role 2', 'Second test role', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [role2Id, 'Test Role 2', 'Second test role', 0, now, now] +); // Assign both roles to user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, role2Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, role2Id, now] +); // Assign different permissions to each role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permission1Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permission1Id, now] +); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [role2Id, permission3Id, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [role2Id, permission3Id, now] +); const permissions = await permissionService.getUserPermissions(userId); @@ -1883,9 +1258,7 @@ describe('PermissionService - getUserPermissions', () => { afterEach(async () => { // Close database - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); // Remove test database try { @@ -1897,7 +1270,7 @@ describe('PermissionService - getUserPermissions', () => { }); describe('PermissionService - Permission Caching', () => { - let db: Database; + let db: DatabaseAdapter; let permissionService: PermissionService; const testDbPath = path.join(__dirname, '../../test-permission-service-caching.db'); let userId: string; @@ -1913,104 +1286,27 @@ describe('PermissionService - Permission Caching', () => { } // Create new database - db = new Database(testDbPath); + db = new SQLiteAdapter(testDbPath); + await db.initialize(); // Create full RBAC schema - await new Promise((resolve, reject) => { - db.exec( - ` - CREATE TABLE users ( - id TEXT PRIMARY KEY, - username TEXT NOT NULL UNIQUE, - email TEXT NOT NULL UNIQUE, - passwordHash TEXT NOT NULL, - firstName TEXT NOT NULL, - lastName TEXT NOT NULL, - isActive INTEGER NOT NULL DEFAULT 1, - isAdmin INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL, - lastLoginAt TEXT - ); - - CREATE TABLE groups ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE roles ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - isBuiltIn INTEGER NOT NULL DEFAULT 0, - createdAt TEXT NOT NULL, - updatedAt TEXT NOT NULL - ); - - CREATE TABLE user_groups ( - userId TEXT NOT NULL, - groupId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, groupId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE - ); - - CREATE TABLE user_roles ( - userId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (userId, roleId), - FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE group_roles ( - groupId TEXT NOT NULL, - roleId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (groupId, roleId), - FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE - ); - - CREATE TABLE role_permissions ( - roleId TEXT NOT NULL, - permissionId TEXT NOT NULL, - assignedAt TEXT NOT NULL, - PRIMARY KEY (roleId, permissionId), - FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, - FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE - ); - - CREATE TABLE permissions ( - id TEXT PRIMARY KEY, - resource TEXT NOT NULL, - action TEXT NOT NULL, - description TEXT NOT NULL DEFAULT '', - createdAt TEXT NOT NULL, - UNIQUE(resource, action) - ); - - CREATE INDEX idx_permissions_resource_action ON permissions(resource, action); - CREATE INDEX idx_user_roles_user ON user_roles(userId); - CREATE INDEX idx_user_roles_role ON user_roles(roleId); - CREATE INDEX idx_user_groups_user ON user_groups(userId); - CREATE INDEX idx_user_groups_group ON user_groups(groupId); - CREATE INDEX idx_group_roles_group ON group_roles(groupId); - CREATE INDEX idx_group_roles_role ON group_roles(roleId); - CREATE INDEX idx_role_permissions_role ON role_permissions(roleId); - CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId); - `, - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute(`CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE, passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, isActive INTEGER NOT NULL DEFAULT 1, isAdmin INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT )`); +await db.execute(`CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); +await db.execute(`CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, isBuiltIn INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); +await db.execute(`CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId), FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId), FOREIGN KEY (groupId) REFERENCES groups(id) ON DELETE CASCADE, FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId), FOREIGN KEY (roleId) REFERENCES roles(id) ON DELETE CASCADE, FOREIGN KEY (permissionId) REFERENCES permissions(id) ON DELETE CASCADE )`); +await db.execute(`CREATE TABLE permissions ( id TEXT PRIMARY KEY, resource TEXT NOT NULL, action TEXT NOT NULL, description TEXT NOT NULL DEFAULT '', createdAt TEXT NOT NULL, UNIQUE(resource, action) )`); +await db.execute(`CREATE INDEX idx_permissions_resource_action ON permissions(resource, action)`); +await db.execute(`CREATE INDEX idx_user_roles_user ON user_roles(userId)`); +await db.execute(`CREATE INDEX idx_user_roles_role ON user_roles(roleId)`); +await db.execute(`CREATE INDEX idx_user_groups_user ON user_groups(userId)`); +await db.execute(`CREATE INDEX idx_user_groups_group ON user_groups(groupId)`); +await db.execute(`CREATE INDEX idx_group_roles_group ON group_roles(groupId)`); +await db.execute(`CREATE INDEX idx_group_roles_role ON group_roles(roleId)`); +await db.execute(`CREATE INDEX idx_role_permissions_role ON role_permissions(roleId)`); +await db.execute(`CREATE INDEX idx_role_permissions_perm ON role_permissions(permissionId)`); // Initialize permission service permissionService = new PermissionService(db); @@ -2020,31 +1316,19 @@ describe('PermissionService - Permission Caching', () => { // Create user userId = 'user-cache-123'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [userId, 'cacheuser', 'cache@example.com', 'hash', 'Cache', 'User', 1, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [userId, 'cacheuser', 'cache@example.com', 'hash', 'Cache', 'User', 1, 0, now, now] +); // Create role roleId = 'role-cache-123'; // pragma: allowlist secret - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) + await db.execute( + `INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?)`, - [roleId, 'Cache Role', 'Cache test role', 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [roleId, 'Cache Role', 'Cache test role', 0, now, now] +); // Create permission const permission = await permissionService.createPermission({ @@ -2055,35 +1339,21 @@ describe('PermissionService - Permission Caching', () => { permissionId = permission.id; // Assign role to user - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [userId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [userId, roleId, now] +); // Assign permission to role - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, - [roleId, permissionId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, + [roleId, permissionId, now] +); }); afterEach(async () => { // Close database - await new Promise((resolve) => { - db.close(() => resolve()); - }); + await db.close(); // Remove test database try { @@ -2119,17 +1389,11 @@ describe('PermissionService - Permission Caching', () => { const adminId = 'admin-cache-123'; // pragma: allowlist secret const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [adminId, 'adminuser', 'admin@example.com', 'hash', 'Admin', 'User', 1, 1, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [adminId, 'adminuser', 'admin@example.com', 'hash', 'Admin', 'User', 1, 1, now, now] +); // First call const result1 = await permissionService.hasPermission(adminId, 'any_resource', 'any_action'); @@ -2145,17 +1409,11 @@ describe('PermissionService - Permission Caching', () => { const inactiveId = 'inactive-cache-123'; // pragma: allowlist secret const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [inactiveId, 'inactiveuser', 'inactive@example.com', 'hash', 'Inactive', 'User', 0, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [inactiveId, 'inactiveuser', 'inactive@example.com', 'hash', 'Inactive', 'User', 0, 0, now, now] +); // First call const result1 = await permissionService.hasPermission(inactiveId, 'ansible', 'read'); @@ -2171,17 +1429,11 @@ describe('PermissionService - Permission Caching', () => { const user2Id = 'user-cache-456'; // pragma: allowlist secret const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [user2Id, 'cacheuser2', 'cache2@example.com', 'hash', 'Cache2', 'User', 1, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [user2Id, 'cacheuser2', 'cache2@example.com', 'hash', 'Cache2', 'User', 1, 0, now, now] +); // Check permissions for both users const result1 = await permissionService.hasPermission(userId, 'ansible', 'read'); @@ -2231,17 +1483,11 @@ describe('PermissionService - Permission Caching', () => { const user2Id = 'user-cache-789'; // pragma: allowlist secret const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [user2Id, 'cacheuser3', 'cache3@example.com', 'hash', 'Cache3', 'User', 1, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [user2Id, 'cacheuser3', 'cache3@example.com', 'hash', 'Cache3', 'User', 1, 0, now, now] +); // Cache permissions for both users await permissionService.hasPermission(userId, 'ansible', 'read'); @@ -2300,17 +1546,11 @@ describe('PermissionService - Permission Caching', () => { const adminId = 'admin-correct-123'; // pragma: allowlist secret const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [adminId, 'admincorrect', 'admincorrect@example.com', 'hash', 'Admin', 'Correct', 1, 1, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [adminId, 'admincorrect', 'admincorrect@example.com', 'hash', 'Admin', 'Correct', 1, 1, now, now] +); // Multiple checks should all return true const result1 = await permissionService.hasPermission(adminId, 'resource1', 'action1'); @@ -2327,29 +1567,17 @@ describe('PermissionService - Permission Caching', () => { const inactiveId = 'inactive-correct-123'; // pragma: allowlist secret const now = new Date().toISOString(); - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + await db.execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [inactiveId, 'inactivecorrect', 'inactivecorrect@example.com', 'hash', 'Inactive', 'Correct', 0, 0, now, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + [inactiveId, 'inactivecorrect', 'inactivecorrect@example.com', 'hash', 'Inactive', 'Correct', 0, 0, now, now] +); // Assign role with permission - await new Promise((resolve, reject) => { - db.run( - `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, - [inactiveId, roleId, now], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await db.execute( + `INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, + [inactiveId, roleId, now] +); // Multiple checks should all return false (inactive users always denied) const result1 = await permissionService.hasPermission(inactiveId, 'ansible', 'read'); @@ -2399,3 +1627,223 @@ describe('PermissionService - Permission Caching', () => { }); }); }); + +describe('PermissionService - Role-level cache invalidation (Requirement 30.2)', () => { + let db: DatabaseAdapter; + let permissionService: PermissionService; + const testDbPath = path.join(__dirname, '../../test-permission-service-role-cache.db'); + let userId: string; + let user2Id: string; + let groupId: string; + let roleId: string; + let groupRoleId: string; + let permissionId: string; + let permission2Id: string; + + beforeEach(async () => { + try { await fs.unlink(testDbPath); } catch { /* ignore */ } + + db = new SQLiteAdapter(testDbPath); + await db.initialize(); + + // Create full RBAC schema + await db.execute(`CREATE TABLE users ( id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE, passwordHash TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, isActive INTEGER NOT NULL DEFAULT 1, isAdmin INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL, lastLoginAt TEXT )`); + await db.execute(`CREATE TABLE groups ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE roles ( id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT NOT NULL, isBuiltIn INTEGER NOT NULL DEFAULT 0, createdAt TEXT NOT NULL, updatedAt TEXT NOT NULL )`); + await db.execute(`CREATE TABLE permissions ( id TEXT PRIMARY KEY, resource TEXT NOT NULL, action TEXT NOT NULL, description TEXT NOT NULL DEFAULT '', createdAt TEXT NOT NULL, UNIQUE(resource, action) )`); + await db.execute(`CREATE TABLE user_groups ( userId TEXT NOT NULL, groupId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, groupId) )`); + await db.execute(`CREATE TABLE user_roles ( userId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (userId, roleId) )`); + await db.execute(`CREATE TABLE group_roles ( groupId TEXT NOT NULL, roleId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (groupId, roleId) )`); + await db.execute(`CREATE TABLE role_permissions ( roleId TEXT NOT NULL, permissionId TEXT NOT NULL, assignedAt TEXT NOT NULL, PRIMARY KEY (roleId, permissionId) )`); + + permissionService = new PermissionService(db); + const now = new Date().toISOString(); + + // Users + userId = 'user-role-cache-1'; // pragma: allowlist secret + user2Id = 'user-role-cache-2'; // pragma: allowlist secret + await db.execute(`INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, 1, 0, ?, ?)`, [userId, 'rolecache1', 'rc1@example.com', 'hash', 'RC', 'One', now, now]); + await db.execute(`INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?, ?, 1, 0, ?, ?)`, [user2Id, 'rolecache2', 'rc2@example.com', 'hash', 'RC', 'Two', now, now]); + + // Group + groupId = 'group-role-cache-1'; // pragma: allowlist secret + await db.execute(`INSERT INTO groups (id, name, description, createdAt, updatedAt) VALUES (?, ?, ?, ?, ?)`, [groupId, 'RC Group', 'Role cache test group', now, now]); + + // Roles + roleId = 'role-cache-direct'; // pragma: allowlist secret + groupRoleId = 'role-cache-group'; // pragma: allowlist secret + await db.execute(`INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?)`, [roleId, 'Direct Role', 'Direct role', now, now]); + await db.execute(`INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?)`, [groupRoleId, 'Group Role', 'Group role', now, now]); + + // Permissions + const perm = await permissionService.createPermission({ resource: 'proxmox', action: 'execute', description: 'Execute Proxmox' }); + permissionId = perm.id; + const perm2 = await permissionService.createPermission({ resource: 'proxmox', action: 'provision', description: 'Provision Proxmox' }); + permission2Id = perm2.id; + + // Assign role directly to user1 + await db.execute(`INSERT INTO user_roles (userId, roleId, assignedAt) VALUES (?, ?, ?)`, [userId, roleId, now]); + // Assign permission to role + await db.execute(`INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, [roleId, permissionId, now]); + + // Assign user2 to group, group to groupRole + await db.execute(`INSERT INTO user_groups (userId, groupId, assignedAt) VALUES (?, ?, ?)`, [user2Id, groupId, now]); + await db.execute(`INSERT INTO group_roles (groupId, roleId, assignedAt) VALUES (?, ?, ?)`, [groupId, groupRoleId, now]); + await db.execute(`INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, [groupRoleId, permissionId, now]); + }); + + afterEach(async () => { + await db.close(); + try { await fs.unlink(testDbPath); } catch { /* ignore */ } + }); + + describe('invalidateRolePermissionCache', () => { + it('should invalidate cache for users with direct role assignment', async () => { + // Populate cache + const before = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(before).toBe(true); + + // Invalidate by role + await permissionService.invalidateRolePermissionCache(roleId); + + // Cache should be cleared — next call hits DB again (still returns true since data unchanged) + const after = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(after).toBe(true); + }); + + it('should invalidate cache for users with group-based role assignment', async () => { + // Populate cache for user2 (group path) + const before = await permissionService.hasPermission(user2Id, 'proxmox', 'execute'); + expect(before).toBe(true); + + // Invalidate by groupRole + await permissionService.invalidateRolePermissionCache(groupRoleId); + + // After invalidation, re-check still works + const after = await permissionService.hasPermission(user2Id, 'proxmox', 'execute'); + expect(after).toBe(true); + }); + + it('should reflect permission changes after role cache invalidation', async () => { + // Cache the result (true — user has proxmox:execute via role) + const before = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(before).toBe(true); + + // Remove the permission from the role at DB level + await db.execute(`DELETE FROM role_permissions WHERE roleId = ? AND permissionId = ?`, [roleId, permissionId]); + + // Without invalidation, cache still returns true + const stale = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(stale).toBe(true); // stale cache + + // Invalidate + await permissionService.invalidateRolePermissionCache(roleId); + + // Now should reflect the DB change + const after = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(after).toBe(false); + }); + + it('should not affect users who do not have the role', async () => { + // Cache for both users + await permissionService.hasPermission(userId, 'proxmox', 'execute'); + await permissionService.hasPermission(user2Id, 'proxmox', 'execute'); + + // Invalidate only the direct role (affects user1 only) + await permissionService.invalidateRolePermissionCache(roleId); + + // user2's cache should remain intact (they use groupRoleId, not roleId) + // Both should still return correct results + const r1 = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + const r2 = await permissionService.hasPermission(user2Id, 'proxmox', 'execute'); + expect(r1).toBe(true); + expect(r2).toBe(true); + }); + + it('should handle role with no assigned users gracefully', async () => { + const now = new Date().toISOString(); + await db.execute(`INSERT INTO roles (id, name, description, isBuiltIn, createdAt, updatedAt) VALUES (?, ?, ?, 0, ?, ?)`, ['orphan-role', 'Orphan', 'No users', now, now]); + + // Should not throw + await expect(permissionService.invalidateRolePermissionCache('orphan-role')).resolves.not.toThrow(); + }); + }); + + describe('invalidateAllPermissionCache', () => { + it('should clear all cached permission entries', async () => { + // Populate cache for multiple users + await permissionService.hasPermission(userId, 'proxmox', 'execute'); + await permissionService.hasPermission(user2Id, 'proxmox', 'execute'); + + // Remove all role_permissions at DB level + await db.execute(`DELETE FROM role_permissions`); + + // Stale cache still returns true + const stale1 = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(stale1).toBe(true); + + // Clear entire cache + permissionService.invalidateAllPermissionCache(); + + // Now should reflect DB state + const after1 = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + const after2 = await permissionService.hasPermission(user2Id, 'proxmox', 'execute'); + expect(after1).toBe(false); + expect(after2).toBe(false); + }); + + it('should handle empty cache gracefully', () => { + expect(() => permissionService.invalidateAllPermissionCache()).not.toThrow(); + }); + }); + + describe('New permission types in cache (Requirement 30.3)', () => { + it('should cache and return correct results for new action types', async () => { + const now = new Date().toISOString(); + + // Create new permission types + const provisionPerm = await permissionService.createPermission({ resource: 'aws', action: 'provision', description: 'Provision AWS' }); + const destroyPerm = await permissionService.createPermission({ resource: 'aws', action: 'destroy', description: 'Destroy AWS' }); + const lifecyclePerm = await permissionService.createPermission({ resource: 'aws', action: 'lifecycle', description: 'Lifecycle AWS' }); + const configurePerm = await permissionService.createPermission({ resource: 'integration_config', action: 'configure', description: 'Configure integrations' }); + const notePerm = await permissionService.createPermission({ resource: 'journal', action: 'note', description: 'Add notes' }); + + // Assign some to user's role + await db.execute(`INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, [roleId, provisionPerm.id, now]); + await db.execute(`INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, [roleId, lifecyclePerm.id, now]); + await db.execute(`INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, [roleId, notePerm.id, now]); + + // Check permissions — first call populates cache, second uses cache + expect(await permissionService.hasPermission(userId, 'aws', 'provision')).toBe(true); + expect(await permissionService.hasPermission(userId, 'aws', 'provision')).toBe(true); // cached + expect(await permissionService.hasPermission(userId, 'aws', 'destroy')).toBe(false); + expect(await permissionService.hasPermission(userId, 'aws', 'lifecycle')).toBe(true); + expect(await permissionService.hasPermission(userId, 'integration_config', 'configure')).toBe(false); + expect(await permissionService.hasPermission(userId, 'journal', 'note')).toBe(true); + }); + }); + + describe('Backward compatibility (Requirement 29.1)', () => { + it('should continue to work for existing proxmox:execute permission checks', async () => { + // user1 has proxmox:execute via direct role + const result = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(result).toBe(true); + + // Cached result + const cached = await permissionService.hasPermission(userId, 'proxmox', 'execute'); + expect(cached).toBe(true); + }); + + it('should work for existing permission types alongside new ones', async () => { + const now = new Date().toISOString(); + + // Add a new-style permission to the same role + await db.execute(`INSERT INTO role_permissions (roleId, permissionId, assignedAt) VALUES (?, ?, ?)`, [roleId, permission2Id, now]); + + // Old permission still works + expect(await permissionService.hasPermission(userId, 'proxmox', 'execute')).toBe(true); + // New permission also works + expect(await permissionService.hasPermission(userId, 'proxmox', 'provision')).toBe(true); + }); + }); +}); diff --git a/backend/test/unit/error-handling.test.ts b/backend/test/unit/error-handling.test.ts index ba444df5..6fe9f9b3 100644 --- a/backend/test/unit/error-handling.test.ts +++ b/backend/test/unit/error-handling.test.ts @@ -97,28 +97,22 @@ describe("Error Handling - Unit Tests", () => { it("should return 401 for inactive user account", async () => { // Create inactive user - await new Promise((resolve, reject) => { - databaseService.getConnection().run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - randomUUID(), - "inactiveuser", - "inactive@example.com", - "$2b$10$abcdefghijklmnopqrstuv", - "Inactive", - "User", - 0, // isActive = false - 0, - new Date().toISOString(), - new Date().toISOString(), - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + [ + randomUUID(), + "inactiveuser", + "inactive@example.com", + "$2b$10$abcdefghijklmnopqrstuv", + "Inactive", + "User", + 0, // isActive = false + 0, + new Date().toISOString(), + new Date().toISOString(), + ] + ); const response = await request(app) .post("/api/auth/login") @@ -814,28 +808,22 @@ async function createTestUser(db: Database): Promise { const bcrypt = require("bcrypt"); const passwordHash = await bcrypt.hash("Password123!", 10); - return new Promise((resolve, reject) => { - databaseService.getConnection().run( - `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, - [ - "test-user-id", - "testuser", - "test@example.com", - passwordHash, - "Test", - "User", - 1, - 0, - new Date().toISOString(), - new Date().toISOString(), - ], - (err) => { - if (err) reject(err); - else resolve(); - } - ); - }); + await databaseService.getConnection().execute( + `INSERT INTO users (id, username, email, passwordHash, firstName, lastName, isActive, isAdmin, createdAt, updatedAt) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + [ + "test-user-id", + "testuser", + "test@example.com", + passwordHash, + "Test", + "User", + 1, + 0, + new Date().toISOString(), + new Date().toISOString(), + ] + ); } async function closeDatabase(db: Database): Promise { diff --git a/backend/vitest.config.ts b/backend/vitest.config.ts index 9a1863df..02945863 100644 --- a/backend/vitest.config.ts +++ b/backend/vitest.config.ts @@ -4,7 +4,7 @@ export default defineConfig({ test: { globals: true, environment: 'node', - include: ['test/**/*.test.ts', 'src/integrations/ssh/__tests__/**/*.test.ts', 'src/integrations/ansible/__tests__/**/*.test.ts'], + include: ['test/**/*.test.ts', 'src/integrations/ssh/__tests__/**/*.test.ts', 'src/integrations/ansible/__tests__/**/*.test.ts', 'src/integrations/proxmox/__tests__/**/*.test.ts', 'src/integrations/aws/__tests__/**/*.test.ts'], exclude: ['node_modules', 'dist'], env: { NODE_ENV: 'test', diff --git a/docs/configuration.md b/docs/configuration.md index f5bde0f3..3bbb7d95 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -211,6 +211,22 @@ Configure execution queue and concurrency limits. - Prevents unbounded queue growth - Should be set based on expected workload and acceptable wait times +### Provisioning Safety + +Control whether destructive provisioning actions are allowed globally. + +#### ALLOW_DESTRUCTIVE_PROVISIONING + +- **Type:** Boolean (`true` or `false`) +- **Default:** `false` (destructive actions are blocked unless explicitly enabled) +- **Description:** When set to `false`, blocks all destructive provisioning actions across every integration. This includes destroying Proxmox VMs/LXC containers and terminating AWS EC2 instances. Non-destructive lifecycle actions (start, stop, shutdown, reboot) remain unaffected. +- **Example:** `ALLOW_DESTRUCTIVE_PROVISIONING=false` +- **Notes:** + - This is a global safety switch — it applies to all current and future provisioning integrations + - When disabled, the API returns `403 Forbidden` with error code `DESTRUCTIVE_ACTION_DISABLED` + - Useful for production environments where accidental resource deletion must be prevented + - Does not affect resource creation (provisioning new VMs/containers is still allowed) + ### UI Configuration Configure user interface features and behavior. @@ -570,5 +586,6 @@ Before deploying to production: - [ ] Streaming limits configured - [ ] Log level appropriate for environment - [ ] Expert mode disabled in production (or restricted) +- [ ] Destructive provisioning disabled if appropriate (`ALLOW_DESTRUCTIVE_PROVISIONING=false`) - [ ] UI features configured (run chart visibility) - [ ] Integration colors displaying correctly diff --git a/docs/development/BACKEND_CODE_ANALYSIS.md b/docs/development/BACKEND_CODE_ANALYSIS.md index f86d3206..3f0245d3 100644 --- a/docs/development/BACKEND_CODE_ANALYSIS.md +++ b/docs/development/BACKEND_CODE_ANALYSIS.md @@ -378,14 +378,22 @@ The Pabawi backend is a Node.js/Express/TypeScript infrastructure management sys - `getConnection()` - Get SQLite connection - `close()` - Close connection - `isInitialized()` - Check if DB is ready -- `initializeSchema()` - Create tables from schema.sql -- `runMigrations()` - Apply migrations from migrations.sql +- `initializeSchema()` - Runs all numbered migrations from migrations/ directory +- `runMigrations()` - Apply numbered migrations using MigrationRunner **Key Files:** -- Reads: `schema.sql`, `migrations.sql` +- Migrations: `migrations/*.sql` (all schema definitions, starting from 000) - Creates database at path from config +**Schema Management Policy (Migration-First):** + +- ALL schema definitions are in numbered migrations (000, 001, 002, etc.) +- Migration 000: Initial schema (executions, revoked_tokens) +- Migration 001: RBAC tables (users, roles, permissions, groups) +- Future changes: Always create a new numbered migration +- Never modify existing migrations after they've been applied + **Relationships:** - Used by: ExecutionRepository, all database operations diff --git a/docs/development/proxmox-examples.md b/docs/development/proxmox-examples.md new file mode 100644 index 00000000..2fd66aad --- /dev/null +++ b/docs/development/proxmox-examples.md @@ -0,0 +1,877 @@ +# Proxmox Integration Configuration Examples + +This document provides complete, working examples for configuring and using the Proxmox integration in Pabawi. + +## Table of Contents + +- [Basic Configuration](#basic-configuration) +- [Authentication Examples](#authentication-examples) +- [Environment Variable Configuration](#environment-variable-configuration) +- [SSL/TLS Configuration](#ssltls-configuration) +- [Lifecycle Action Examples](#lifecycle-action-examples) +- [Provisioning Examples](#provisioning-examples) +- [Advanced Use Cases](#advanced-use-cases) + +## Basic Configuration + +### Minimal Configuration with Token + +```typescript +// config/integrations.ts +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + token: 'automation@pve!api-token=12345678-1234-1234-1234-123456789abc' + } + } +}; +``` + +### Configuration with Custom Port + +```typescript +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + port: 8007, // Custom port + token: 'automation@pve!api-token=12345678-1234-1234-1234-123456789abc' + } + } +}; +``` + +## Authentication Examples + +### Token Authentication (Recommended) + +```typescript +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + port: 8006, + token: 'automation@pve!api-token=12345678-1234-1234-1234-123456789abc' + } + } +}; +``` + +**Creating the API Token in Proxmox:** + +```bash +# Via Proxmox CLI +pveum user token add automation@pve api-token --privsep 0 + +# The output will be: +# ┌──────────────┬──────────────────────────────────────┐ +# │ key │ value │ +# ╞══════════════╪══════════════════════════════════════╡ +# │ full-tokenid │ automation@pve!api-token │ +# ├──────────────┼──────────────────────────────────────┤ +# │ info │ {"privsep":0} │ +# ├──────────────┼──────────────────────────────────────┤ +# │ value │ 12345678-1234-1234-1234-123456789abc │ +# └──────────────┴──────────────────────────────────────┘ + +# Set permissions for the token user +pveum acl modify / --users automation@pve --roles PVEVMAdmin +``` + +### Password Authentication with PAM + +```typescript +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + port: 8006, + username: 'root', + password: 'your-secure-password', + realm: 'pam' + } + } +}; +``` + +### Password Authentication with PVE Realm + +```typescript +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + port: 8006, + username: 'automation', + password: 'your-secure-password', + realm: 'pve' + } + } +}; +``` + +## Environment Variable Configuration + +### .env File + +```bash +# .env +# Proxmox Connection +PROXMOX_HOST=proxmox.example.com +PROXMOX_PORT=8006 + +# Token Authentication (recommended) +PROXMOX_TOKEN=automation@pve!api-token=12345678-1234-1234-1234-123456789abc + +# OR Password Authentication +# PROXMOX_USERNAME=root +# PROXMOX_PASSWORD=your-secure-password +# PROXMOX_REALM=pam + +# SSL Configuration +PROXMOX_SSL_VERIFY=true +PROXMOX_CA_CERT=/etc/pabawi/certs/proxmox-ca.pem + +# Optional: Client Certificate Authentication +# PROXMOX_CLIENT_CERT=/etc/pabawi/certs/client.pem +# PROXMOX_CLIENT_KEY=/etc/pabawi/certs/client-key.pem + +# Timeout Configuration +PROXMOX_TIMEOUT=30000 +``` + +### Configuration Using Environment Variables + +```typescript +// config/integrations.ts +import * as dotenv from 'dotenv'; +dotenv.config(); + +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: process.env.PROXMOX_HOST!, + port: parseInt(process.env.PROXMOX_PORT || '8006'), + token: process.env.PROXMOX_TOKEN, + // Fallback to password auth if token not provided + username: process.env.PROXMOX_USERNAME, + password: process.env.PROXMOX_PASSWORD, + realm: process.env.PROXMOX_REALM, + ssl: { + rejectUnauthorized: process.env.PROXMOX_SSL_VERIFY !== 'false', + ca: process.env.PROXMOX_CA_CERT, + cert: process.env.PROXMOX_CLIENT_CERT, + key: process.env.PROXMOX_CLIENT_KEY + }, + timeout: parseInt(process.env.PROXMOX_TIMEOUT || '30000') + } + } +}; +``` + +### Docker Environment Variables + +```yaml +# docker-compose.yml +version: '3.8' +services: + pabawi: + image: pabawi:latest + environment: + - PROXMOX_HOST=proxmox.example.com + - PROXMOX_PORT=8006 + - PROXMOX_TOKEN=automation@pve!api-token=12345678-1234-1234-1234-123456789abc + - PROXMOX_SSL_VERIFY=true + volumes: + - ./certs:/etc/pabawi/certs:ro +``` + +## SSL/TLS Configuration + +### Self-Signed Certificate + +```typescript +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + port: 8006, + token: 'automation@pve!api-token=12345678-1234-1234-1234-123456789abc', + ssl: { + rejectUnauthorized: true, + ca: '/etc/pabawi/certs/proxmox-ca.pem' + } + } + } +}; +``` + +**Exporting Proxmox CA Certificate:** + +```bash +# On Proxmox server +cat /etc/pve/pve-root-ca.pem > proxmox-ca.pem + +# Copy to Pabawi server +scp proxmox-ca.pem pabawi-server:/etc/pabawi/certs/ +``` + +### Client Certificate Authentication + +```typescript +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + port: 8006, + token: 'automation@pve!api-token=12345678-1234-1234-1234-123456789abc', + ssl: { + rejectUnauthorized: true, + ca: '/etc/pabawi/certs/proxmox-ca.pem', + cert: '/etc/pabawi/certs/client.pem', + key: '/etc/pabawi/certs/client-key.pem' + } + } + } +}; +``` + +### Disable Certificate Verification (Testing Only) + +```typescript +export const integrationsConfig = { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: 'proxmox.example.com', + port: 8006, + token: 'automation@pve!api-token=12345678-1234-1234-1234-123456789abc', + ssl: { + rejectUnauthorized: false // WARNING: Insecure, testing only! + } + } + } +}; +``` + +**⚠️ Warning**: Disabling certificate verification is insecure and should only be used in testing environments. + +## Lifecycle Action Examples + +### Start a VM + +```typescript +import { integrationManager } from './integrations'; + +async function startVM() { + const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'start', + parameters: {} + }); + + if (result.success) { + console.log('VM started successfully'); + } else { + console.error('Failed to start VM:', result.error); + } +} +``` + +### Graceful Shutdown + +```typescript +async function shutdownVM() { + const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'shutdown', + parameters: {} + }); + + if (result.success) { + console.log('VM shutdown initiated'); + } else { + console.error('Failed to shutdown VM:', result.error); + } +} +``` + +### Force Stop + +```typescript +async function stopVM() { + const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'stop', + parameters: {} + }); + + if (result.success) { + console.log('VM stopped'); + } else { + console.error('Failed to stop VM:', result.error); + } +} +``` + +### Reboot a Container + +```typescript +async function rebootContainer() { + const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:101', + action: 'reboot', + parameters: {} + }); + + if (result.success) { + console.log('Container rebooted'); + } else { + console.error('Failed to reboot container:', result.error); + } +} +``` + +### Suspend and Resume + +```typescript +async function suspendVM() { + const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'suspend', + parameters: {} + }); + + if (result.success) { + console.log('VM suspended'); + } +} + +async function resumeVM() { + const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'resume', + parameters: {} + }); + + if (result.success) { + console.log('VM resumed'); + } +} +``` + +## Provisioning Examples + +### Create a Basic VM + +```typescript +async function createBasicVM() { + const result = await integrationManager.executeAction({ + type: 'provision', + action: 'create_vm', + parameters: { + vmid: 100, + name: 'web-server-01', + node: 'node1', + cores: 2, + memory: 2048, + disk: 'local-lvm:32', + network: { + model: 'virtio', + bridge: 'vmbr0' + } + } + }); + + if (result.success) { + console.log('VM created:', result.metadata); + } else { + console.error('Failed to create VM:', result.error); + } +} +``` + +### Create a VM with Advanced Configuration + +```typescript +async function createAdvancedVM() { + const result = await integrationManager.executeAction({ + type: 'provision', + action: 'create_vm', + parameters: { + vmid: 101, + name: 'database-server', + node: 'node1', + cores: 4, + sockets: 2, + memory: 8192, + cpu: 'host', + ostype: 'l26', + disk: 'local-lvm:100', + scsi0: 'local-lvm:100,cache=writeback,discard=on', + network: { + model: 'virtio', + bridge: 'vmbr0', + firewall: 1, + tag: 100 + }, + ide2: 'local:iso/ubuntu-22.04-server-amd64.iso,media=cdrom' + } + }); + + if (result.success) { + console.log('Advanced VM created:', result.metadata); + } else { + console.error('Failed to create VM:', result.error); + } +} +``` + +### Create an LXC Container + +```typescript +async function createContainer() { + const result = await integrationManager.executeAction({ + type: 'provision', + action: 'create_lxc', + parameters: { + vmid: 200, + hostname: 'app-container-01', + node: 'node1', + ostemplate: 'local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst', + cores: 2, + memory: 1024, + rootfs: 'local-lvm:8', + network: { + name: 'eth0', + bridge: 'vmbr0', + ip: 'dhcp', + firewall: 1 + }, + password: 'secure-root-password' + } + }); + + if (result.success) { + console.log('Container created:', result.metadata); + } else { + console.error('Failed to create container:', result.error); + } +} +``` + +### Create Container with Static IP + +```typescript +async function createContainerWithStaticIP() { + const result = await integrationManager.executeAction({ + type: 'provision', + action: 'create_lxc', + parameters: { + vmid: 201, + hostname: 'web-container', + node: 'node1', + ostemplate: 'local:vztmpl/debian-11-standard_11.7-1_amd64.tar.zst', + cores: 1, + memory: 512, + rootfs: 'local-lvm:8', + network: { + name: 'eth0', + bridge: 'vmbr0', + ip: '192.168.1.100/24', + gw: '192.168.1.1' + } + } + }); + + if (result.success) { + console.log('Container with static IP created'); + } +} +``` + +### Destroy a VM + +```typescript +async function destroyVM() { + const result = await integrationManager.executeAction({ + type: 'provision', + action: 'destroy_vm', + parameters: { + vmid: 100, + node: 'node1' + } + }); + + if (result.success) { + console.log('VM destroyed successfully'); + } else { + console.error('Failed to destroy VM:', result.error); + } +} +``` + +### Destroy a Container + +```typescript +async function destroyContainer() { + const result = await integrationManager.executeAction({ + type: 'provision', + action: 'destroy_lxc', + parameters: { + vmid: 200, + node: 'node1' + } + }); + + if (result.success) { + console.log('Container destroyed successfully'); + } else { + console.error('Failed to destroy container:', result.error); + } +} +``` + +## Advanced Use Cases + +### Batch VM Creation + +```typescript +async function createMultipleVMs() { + const vmConfigs = [ + { vmid: 100, name: 'web-01', cores: 2, memory: 2048 }, + { vmid: 101, name: 'web-02', cores: 2, memory: 2048 }, + { vmid: 102, name: 'web-03', cores: 2, memory: 2048 } + ]; + + const results = await Promise.all( + vmConfigs.map(config => + integrationManager.executeAction({ + type: 'provision', + action: 'create_vm', + parameters: { + ...config, + node: 'node1', + disk: 'local-lvm:32', + network: { model: 'virtio', bridge: 'vmbr0' } + } + }) + ) + ); + + const successful = results.filter(r => r.success).length; + console.log(`Created ${successful}/${vmConfigs.length} VMs`); +} +``` + +### Rolling Restart of VMs + +```typescript +async function rollingRestart(vmids: number[]) { + for (const vmid of vmids) { + const target = `proxmox:node1:${vmid}`; + + // Graceful shutdown + await integrationManager.executeAction({ + type: 'lifecycle', + target, + action: 'shutdown', + parameters: {} + }); + + // Wait for shutdown + await new Promise(resolve => setTimeout(resolve, 30000)); + + // Start VM + await integrationManager.executeAction({ + type: 'lifecycle', + target, + action: 'start', + parameters: {} + }); + + // Wait before next VM + await new Promise(resolve => setTimeout(resolve, 10000)); + } +} + +// Usage +rollingRestart([100, 101, 102]); +``` + +### Get VM Facts and Display + +```typescript +async function displayVMInfo(vmid: number) { + const nodeId = `proxmox:node1:${vmid}`; + const facts = await integrationManager.getNodeFacts(nodeId); + + console.log('VM Information:'); + console.log(' Name:', facts.name); + console.log(' Status:', facts.status); + console.log(' CPU Cores:', facts.cpu?.cores); + console.log(' Memory:', facts.memory?.total, 'MB'); + console.log(' Disk:', facts.disk?.size, 'GB'); + console.log(' IP Address:', facts.network?.ip); + + if (facts.status === 'running') { + console.log(' Uptime:', facts.uptime, 'seconds'); + console.log(' Memory Usage:', facts.memory?.used, 'MB'); + console.log(' CPU Usage:', facts.cpu?.usage, '%'); + } +} +``` + +### Monitor VM Status + +```typescript +async function monitorVMStatus(vmid: number, interval: number = 5000) { + const nodeId = `proxmox:node1:${vmid}`; + + setInterval(async () => { + try { + const facts = await integrationManager.getNodeFacts(nodeId); + console.log(`[${new Date().toISOString()}] VM ${vmid}:`, { + status: facts.status, + cpu: facts.cpu?.usage, + memory: facts.memory?.used, + uptime: facts.uptime + }); + } catch (error) { + console.error('Failed to get VM status:', error); + } + }, interval); +} +``` + +### Inventory Report + +```typescript +async function generateInventoryReport() { + const inventory = await integrationManager.getInventory(); + const proxmoxNodes = inventory.filter(n => n.id.startsWith('proxmox:')); + + const report = { + total: proxmoxNodes.length, + running: proxmoxNodes.filter(n => n.status === 'running').length, + stopped: proxmoxNodes.filter(n => n.status === 'stopped').length, + vms: proxmoxNodes.filter(n => n.metadata?.type === 'qemu').length, + containers: proxmoxNodes.filter(n => n.metadata?.type === 'lxc').length, + byNode: {} as Record + }; + + proxmoxNodes.forEach(node => { + const nodeName = node.metadata?.node as string; + report.byNode[nodeName] = (report.byNode[nodeName] || 0) + 1; + }); + + console.log('Proxmox Inventory Report:'); + console.log(' Total Guests:', report.total); + console.log(' Running:', report.running); + console.log(' Stopped:', report.stopped); + console.log(' VMs:', report.vms); + console.log(' Containers:', report.containers); + console.log(' By Node:', report.byNode); +} +``` + +### Health Check with Alerting + +```typescript +async function checkHealthWithAlert() { + const health = await integrationManager.healthCheckAll(); + const proxmoxHealth = health.get('proxmox'); + + if (!proxmoxHealth?.healthy) { + // Send alert (example using console, replace with your alerting system) + console.error('ALERT: Proxmox integration unhealthy!', { + message: proxmoxHealth?.message, + details: proxmoxHealth?.details, + timestamp: new Date().toISOString() + }); + + // Could send email, Slack notification, PagerDuty alert, etc. + // await sendAlert('Proxmox integration unhealthy', proxmoxHealth); + } else { + console.log('Proxmox integration healthy'); + } +} + +// Run health check every 5 minutes +setInterval(checkHealthWithAlert, 5 * 60 * 1000); +``` + +### Auto-scaling Example + +```typescript +async function autoScaleWebServers(targetCount: number) { + const inventory = await integrationManager.getInventory(); + const webServers = inventory.filter(n => + n.name?.startsWith('web-') && n.id.startsWith('proxmox:') + ); + + const currentCount = webServers.length; + + if (currentCount < targetCount) { + // Scale up + const toCreate = targetCount - currentCount; + console.log(`Scaling up: creating ${toCreate} web servers`); + + for (let i = 0; i < toCreate; i++) { + const vmid = 100 + currentCount + i; + await integrationManager.executeAction({ + type: 'provision', + action: 'create_vm', + parameters: { + vmid, + name: `web-${vmid}`, + node: 'node1', + cores: 2, + memory: 2048, + disk: 'local-lvm:32', + network: { model: 'virtio', bridge: 'vmbr0' } + } + }); + } + } else if (currentCount > targetCount) { + // Scale down + const toRemove = currentCount - targetCount; + console.log(`Scaling down: removing ${toRemove} web servers`); + + const serversToRemove = webServers.slice(-toRemove); + for (const server of serversToRemove) { + const vmid = server.metadata?.vmid as number; + await integrationManager.executeAction({ + type: 'provision', + action: 'destroy_vm', + parameters: { + vmid, + node: server.metadata?.node as string + } + }); + } + } else { + console.log('Already at target count'); + } +} +``` + +## Testing Configuration + +### Test Connection + +```typescript +async function testConnection() { + try { + const health = await integrationManager.healthCheckAll(); + const proxmoxHealth = health.get('proxmox'); + + if (proxmoxHealth?.healthy) { + console.log('✓ Connection successful'); + console.log(' Proxmox version:', proxmoxHealth.details?.version); + } else { + console.error('✗ Connection failed:', proxmoxHealth?.message); + } + } catch (error) { + console.error('✗ Connection error:', error); + } +} +``` + +### Validate Permissions + +```typescript +async function validatePermissions() { + const tests = [ + { name: 'List VMs', fn: () => integrationManager.getInventory() }, + { name: 'Get VM Facts', fn: () => integrationManager.getNodeFacts('proxmox:node1:100') }, + { name: 'List Capabilities', fn: () => integrationManager.listCapabilities() } + ]; + + for (const test of tests) { + try { + await test.fn(); + console.log(`✓ ${test.name}`); + } catch (error) { + console.error(`✗ ${test.name}:`, error); + } + } +} +``` + +## Troubleshooting Examples + +### Debug Logging + +```typescript +// Enable debug logging +import { LoggerService } from './services/logger'; + +const logger = new LoggerService({ + level: 'debug', + component: 'proxmox-integration' +}); + +// All Proxmox API calls will now be logged +``` + +### Connection Test Script + +```bash +#!/bin/bash +# test-proxmox-connection.sh + +echo "Testing Proxmox connection..." + +# Test basic connectivity +curl -k "https://${PROXMOX_HOST}:${PROXMOX_PORT}/api2/json/version" \ + -H "Authorization: PVEAPIToken=${PROXMOX_TOKEN}" \ + | jq . + +# Test authentication +curl -k "https://${PROXMOX_HOST}:${PROXMOX_PORT}/api2/json/cluster/resources?type=vm" \ + -H "Authorization: PVEAPIToken=${PROXMOX_TOKEN}" \ + | jq '.data | length' + +echo "Connection test complete" +``` + +## Additional Resources + +- [Proxmox Integration Documentation](../integrations/proxmox.md) +- [Proxmox VE API Documentation](https://pve.proxmox.com/pve-docs/api-viewer/) +- [Proxmox VE Administration Guide](https://pve.proxmox.com/pve-docs/pve-admin-guide.html) +- [Pabawi Documentation](https://pabawi.dev/docs) diff --git a/docs/integrations/aws.md b/docs/integrations/aws.md new file mode 100644 index 00000000..c914292a --- /dev/null +++ b/docs/integrations/aws.md @@ -0,0 +1,556 @@ +# AWS Integration + +The AWS integration enables Pabawi to manage Amazon EC2 infrastructure, including instance provisioning, lifecycle management, and inventory discovery across AWS regions. + +## Features + +- **Inventory Discovery**: Automatically discover all EC2 instances across regions +- **Group Management**: Organize instances by region, VPC, and tags +- **Facts Retrieval**: Get detailed instance metadata and configuration +- **Lifecycle Actions**: Start, stop, reboot, and terminate EC2 instances +- **Provisioning**: Launch new EC2 instances with full parameter control +- **Health Monitoring**: Validate AWS credentials via STS GetCallerIdentity + +## Configuration + +### Basic Configuration + +Add the AWS integration to your Pabawi configuration: + +```typescript +{ + integrations: { + aws: { + enabled: true, + name: 'aws', + type: 'both', + priority: 10, + config: { + accessKeyId: 'AKIAIOSFODNN7EXAMPLE', + secretAccessKey: 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY', + region: 'us-east-1' + } + } + } +} +``` + +### Configuration Options + +| Option | Type | Required | Default | Description | +|--------|------|----------|---------|-------------| +| `accessKeyId` | string | No* | - | AWS access key ID | +| `secretAccessKey` | string | No* | - | AWS secret access key | +| `region` | string | No | us-east-1 | Default AWS region | +| `regions` | string[] | No | - | List of regions to query for inventory (overrides `region` for discovery) | +| `sessionToken` | string | No | - | Session token for temporary credentials (STS) | +| `profile` | string | No | - | AWS CLI profile name (resolved by the SDK from `~/.aws/credentials`) | +| `endpoint` | string | No | - | Custom endpoint URL (for testing or VPC endpoints) | + +*If no explicit credentials or profile are provided, the AWS SDK default credential chain is used (environment variables, `~/.aws/credentials`, instance profile, etc.). + +### Environment Variables + +You can use environment variables for sensitive configuration: + +```bash +# Required +AWS_ENABLED=true + +# Credentials (optional — if omitted, the AWS SDK default credential chain is used) +# AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE +# AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +AWS_DEFAULT_REGION=us-east-1 + +# Query multiple regions for inventory (JSON array or comma-separated) +# AWS_REGIONS=["us-east-1","eu-west-1","ap-southeast-1"] + +# Optional: AWS CLI profile name +# AWS_PROFILE=default + +# Optional: Session token for temporary credentials +# AWS_SESSION_TOKEN=your_session_token_here +``` + +### Database Configuration (Recommended) + +You can also configure AWS credentials through the Pabawi UI at **Integrations > Config**. This stores credentials encrypted in the database using AES-256-GCM and allows per-user configuration that overrides environment variables. + +Navigate to `/integrations/config`, select **AWS**, and enter your credentials. Alternatively, use the **AWS Setup Guide** on the setup page for a guided walkthrough. + +## Authentication + +### IAM User with Access Keys (Recommended for Development) + +Create a dedicated IAM user with programmatic access. + +#### Creating an IAM User + +1. Open the AWS IAM Console +2. Navigate to **Users → Add users** +3. Enter a username (e.g., `pabawi-ec2`) +4. Select **Access key - Programmatic access** +5. Attach the required policy (see below) +6. Copy the Access Key ID and Secret Access Key + +#### Required IAM Permissions + +Grant the following permissions to the IAM user: + +- `ec2:RunInstances` — Launch new instances +- `ec2:DescribeInstances` — List and inspect instances +- `ec2:StartInstances` — Start stopped instances +- `ec2:StopInstances` — Stop running instances +- `ec2:RebootInstances` — Reboot instances +- `ec2:TerminateInstances` — Terminate instances +- `ec2:DescribeRegions` — List available regions +- `ec2:DescribeInstanceTypes` — List instance types +- `ec2:DescribeImages` — List AMIs +- `ec2:DescribeVpcs` — List VPCs +- `ec2:DescribeSubnets` — List subnets +- `ec2:DescribeSecurityGroups` — List security groups +- `ec2:DescribeKeyPairs` — List key pairs +- `sts:GetCallerIdentity` — Health check validation + +#### Minimal IAM Policy + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:RunInstances", + "ec2:DescribeInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:DescribeRegions", + "ec2:DescribeInstanceTypes", + "ec2:DescribeImages", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeKeyPairs" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "sts:GetCallerIdentity", + "Resource": "*" + } + ] +} +``` + +### Temporary Credentials (STS) + +For enhanced security, use temporary credentials from AWS STS: + +```bash +AWS_ACCESS_KEY_ID=ASIATEMP... +AWS_SECRET_ACCESS_KEY=tempSecret... +AWS_SESSION_TOKEN=FwoGZXIvYXdzE... +``` + +**Note**: Temporary credentials expire. The integration does not automatically refresh them. + +### AWS CLI Profile + +You can use a named profile from your `~/.aws/credentials` and `~/.aws/config` files: + +```bash +AWS_ENABLED=true +AWS_PROFILE=my-profile +``` + +When `AWS_PROFILE` is set, the AWS SDK reads credentials and region from the corresponding profile in your AWS config files. This is useful when you manage multiple AWS accounts or use SSO. + +**Note**: `AWS_PROFILE` is passed to the AWS SDK via the process environment (loaded by dotenv). Pabawi does not read the profile files directly — the SDK handles credential resolution. + +### Default Credential Chain + +If no explicit credentials (`AWS_ACCESS_KEY_ID`/`AWS_SECRET_ACCESS_KEY`) or profile (`AWS_PROFILE`) are configured, the AWS SDK automatically resolves credentials using its default credential provider chain, in this order: + +1. Environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN`) +2. Shared credentials file (`~/.aws/credentials`) +3. AWS config file (`~/.aws/config`) +4. ECS container credentials (if running in ECS) +5. EC2 instance metadata / IAM role (if running on EC2) + +This means `AWS_ENABLED=true` with no other AWS settings will work if your environment already has credentials configured through any of these mechanisms. + +## Inventory Discovery + +The AWS integration discovers all EC2 instances across the configured regions. If `AWS_REGIONS` is set, all listed regions are queried in parallel. Otherwise, only the default region (`AWS_DEFAULT_REGION` or `us-east-1`) is queried. + +### Node Format + +Each discovered instance is represented as a Node: + +```typescript +{ + id: 'aws:us-east-1:i-0123456789abcdef0', + name: 'my-instance', + status: 'running' | 'stopped' | 'pending' | 'terminated', + ip: '10.0.1.100', + metadata: { + instanceType: 't3.micro', + region: 'us-east-1', + vpcId: 'vpc-abc123', + tags: { Name: 'my-instance', Environment: 'production' }, + source: 'aws' + } +} +``` + +### Groups + +Instances are automatically organized into groups: + +- **By Region**: `aws:region:us-east-1` — All instances in a region +- **By VPC**: `aws:vpc:vpc-abc123` — All instances in a VPC +- **By Tag**: `aws:tag:Environment:production` — Instances matching a tag + +## Lifecycle Actions + +### Supported Actions + +| Action | Description | +|--------|-------------| +| `start` | Start a stopped instance | +| `stop` | Stop a running instance | +| `reboot` | Reboot a running instance | +| `terminate` | Permanently terminate an instance | + +### Action Examples + +#### Start an Instance + +```typescript +const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'aws:us-east-1:i-0123456789abcdef0', + action: 'start', + parameters: {} +}); +``` + +#### Terminate an Instance + +```typescript +const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'aws:us-east-1:i-0123456789abcdef0', + action: 'terminate', + parameters: {} +}); +``` + +### Action Results + +All actions return an `ExecutionResult`: + +```typescript +{ + success: true, + output: 'Instance started successfully', + metadata: { + instanceId: 'i-0123456789abcdef0', + region: 'us-east-1' + } +} +``` + +## Provisioning + +### Launch an EC2 Instance + +```typescript +const result = await integrationManager.executeAction({ + type: 'provision', + action: 'provision', + parameters: { + imageId: 'ami-0abcdef1234567890', + instanceType: 't3.micro', + keyName: 'my-key-pair', + securityGroupIds: ['sg-0123456789abcdef0'], + subnetId: 'subnet-0123456789abcdef0', + region: 'us-east-1', + name: 'my-new-instance' + } +}); +``` + +#### Provisioning Parameters + +| Parameter | Type | Required | Default | Description | +|-----------|------|----------|---------|-------------| +| `imageId` | string | Yes | - | AMI ID to launch | +| `instanceType` | string | No | t3.micro | EC2 instance type | +| `keyName` | string | No | - | SSH key pair name | +| `securityGroupIds` | string[] | No | - | Security group IDs | +| `subnetId` | string | No | - | Subnet ID for VPC placement | +| `region` | string | No | default | AWS region to launch in | +| `name` | string | No | - | Instance name tag | + +## Resource Discovery + +The integration provides endpoints to discover AWS resources for provisioning: + +| Endpoint | Description | +|----------|-------------| +| `GET /api/integrations/aws/regions` | List available AWS regions | +| `GET /api/integrations/aws/instance-types` | List EC2 instance types | +| `GET /api/integrations/aws/amis?region=` | List AMIs by region | +| `GET /api/integrations/aws/vpcs?region=` | List VPCs by region | +| `GET /api/integrations/aws/subnets?region=` | List subnets by region | +| `GET /api/integrations/aws/security-groups?region=` | List security groups by region | +| `GET /api/integrations/aws/key-pairs?region=` | List key pairs by region | + +## Health Monitoring + +Check the health of the AWS integration: + +```typescript +const health = await integrationManager.healthCheckAll(); +const awsHealth = health.get('aws'); + +console.log(awsHealth); +// { +// healthy: true, +// message: 'AWS credentials valid', +// details: { account: '123456789012', arn: 'arn:aws:iam::...' }, +// lastCheck: 1234567890 +// } +``` + +### Health States + +- **Healthy**: STS GetCallerIdentity succeeds, credentials are valid +- **Unhealthy**: Authentication failed or API unreachable + +The plugin continues accepting configuration updates when unhealthy. + +## Error Handling + +### Error Types + +- `AWSAuthenticationError` — Invalid or expired credentials (401/403) +- General API errors — Network issues, rate limiting, service errors + +### Common Errors + +#### InvalidClientTokenId + +``` +AWSAuthenticationError: AWS authentication failed +``` + +**Solution**: Verify your Access Key ID is correct and the IAM user is active. + +#### SignatureDoesNotMatch + +``` +AWSAuthenticationError: AWS authentication failed +``` + +**Solution**: Verify your Secret Access Key is correct. + +#### UnauthorizedOperation + +``` +AccessDenied: User is not authorized to perform ec2:RunInstances +``` + +**Solution**: Attach the required IAM policy to your user or role. + +#### ExpiredToken + +``` +AWSAuthenticationError: AWS authentication failed +``` + +**Solution**: Refresh your temporary credentials (session token has expired). + +## Troubleshooting + +### Connection Issues + +**Problem**: Cannot reach AWS API endpoints + +**Solutions**: + +1. Check network connectivity to AWS endpoints +2. Verify proxy settings if behind a corporate firewall +3. Ensure DNS resolution works for `ec2.{region}.amazonaws.com` +4. Test with AWS CLI: `aws sts get-caller-identity` + +### Authentication Issues + +**Problem**: Credentials are rejected + +**Solutions**: + +1. Verify Access Key ID and Secret Access Key are correct +2. Check the IAM user is not disabled or deleted +3. For temporary credentials, ensure the session token is still valid +4. Verify no restrictive IAM policies or SCPs are blocking access + +### Permission Issues + +**Problem**: Operations fail with access denied + +**Solutions**: + +1. Review the IAM policy attached to the user +2. Check for restrictive Service Control Policies (SCPs) +3. Verify the region is enabled in your AWS account +4. Review CloudTrail logs for detailed permission errors + +### Region Issues + +**Problem**: Resources not found or empty inventory + +**Solutions**: + +1. Verify the correct region is configured +2. Check that EC2 instances exist in the specified region +3. Ensure the region is enabled in your AWS account settings + +## Best Practices + +### Security + +1. **Use Least Privilege**: Grant only the specific EC2 actions Pabawi needs +2. **Rotate Credentials**: Regularly rotate access keys +3. **Use Temporary Credentials**: Prefer STS temporary credentials over long-lived keys +4. **Store Securely**: Use the Pabawi Integration Config UI (encrypted storage) rather than plain .env files +5. **Monitor Access**: Enable CloudTrail logging for API activity + +### Performance + +1. **Region Selection**: Configure the region closest to your Pabawi instance +2. **Resource Caching**: The integration caches inventory and resource discovery results + +### Reliability + +1. **Handle Errors**: Always check `ExecutionResult.success` before proceeding +2. **Health Checks**: Monitor integration health to detect credential expiration early +3. **Journal Events**: All provisioning and lifecycle actions are recorded in the journal + +## API Reference + +### Integration Methods + +#### getInventory() + +Returns all EC2 instances across the configured regions (queries all regions in parallel). + +```typescript +const nodes = await awsPlugin.getInventory(); +``` + +#### getGroups() + +Returns groups organized by region, VPC, and tags. + +```typescript +const groups = await awsPlugin.getGroups(); +``` + +#### getNodeFacts(nodeId: string) + +Returns detailed facts for a specific EC2 instance. + +```typescript +const facts = await awsPlugin.getNodeFacts('aws:us-east-1:i-0123456789abcdef0'); +``` + +#### executeAction(action: Action) + +Executes a lifecycle or provisioning action. + +```typescript +const result = await awsPlugin.executeAction({ + type: 'lifecycle', + target: 'aws:us-east-1:i-0123456789abcdef0', + action: 'start', + parameters: {} +}); +``` + +#### performHealthCheck() + +Validates AWS credentials using STS GetCallerIdentity. + +```typescript +const health = await awsPlugin.performHealthCheck(); +``` + +#### getRegions() + +Returns available AWS regions. + +```typescript +const regions = await awsPlugin.getRegions(); +``` + +#### getInstanceTypes(region?: string) + +Returns available EC2 instance types. + +```typescript +const types = await awsPlugin.getInstanceTypes('us-east-1'); +``` + +#### getAMIs(region: string) + +Returns available AMIs for a region. + +```typescript +const amis = await awsPlugin.getAMIs('us-east-1'); +``` + +#### getVPCs(region: string) + +Returns VPCs for a region. + +```typescript +const vpcs = await awsPlugin.getVPCs('us-east-1'); +``` + +#### getSubnets(region: string, vpcId?: string) + +Returns subnets for a region, optionally filtered by VPC. + +```typescript +const subnets = await awsPlugin.getSubnets('us-east-1', 'vpc-abc123'); +``` + +#### getSecurityGroups(region: string, vpcId?: string) + +Returns security groups for a region, optionally filtered by VPC. + +```typescript +const sgs = await awsPlugin.getSecurityGroups('us-east-1'); +``` + +#### getKeyPairs(region: string) + +Returns key pairs for a region. + +```typescript +const keyPairs = await awsPlugin.getKeyPairs('us-east-1'); +``` + +## Support + +For issues, questions, or contributions: + +- GitHub Issues: [pabawi/issues](https://github.com/pabawi/pabawi/issues) +- Documentation: [pabawi.dev/docs](https://pabawi.dev/docs) +- AWS EC2 Docs: [docs.aws.amazon.com/ec2](https://docs.aws.amazon.com/ec2/) diff --git a/docs/integrations/proxmox.md b/docs/integrations/proxmox.md new file mode 100644 index 00000000..a768c83c --- /dev/null +++ b/docs/integrations/proxmox.md @@ -0,0 +1,647 @@ +# Proxmox Integration + +The Proxmox integration enables Pabawi to manage Proxmox Virtual Environment (VE) infrastructure, including virtual machines (VMs) and Linux containers (LXC). This integration provides inventory discovery, lifecycle management, and provisioning capabilities for your Proxmox cluster. + +## Features + +- **Inventory Discovery**: Automatically discover all VMs and containers across your Proxmox cluster +- **Group Management**: Organize resources by node, status, and type +- **Facts Retrieval**: Get detailed configuration and status information for any guest +- **Lifecycle Actions**: Start, stop, shutdown, reboot, suspend, and resume VMs and containers +- **Provisioning**: Create and destroy VMs and LXC containers programmatically +- **Health Monitoring**: Monitor the health and connectivity of your Proxmox cluster + +## Configuration + +### Basic Configuration + +Add the Proxmox integration to your Pabawi configuration: + +```typescript +{ + integrations: { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + priority: 10, + config: { + host: 'proxmox.example.com', + port: 8006, + token: 'user@realm!tokenid=uuid' + } + } + } +} +``` + +### Configuration Options + +| Option | Type | Required | Default | Description | +|--------|------|----------|---------|-------------| +| `host` | string | Yes | - | Proxmox server hostname or IP address | +| `port` | number | No | 8006 | Proxmox API port | +| `token` | string | No* | - | API token for authentication (recommended) | +| `username` | string | No* | - | Username for password authentication | +| `password` | string | No* | - | Password for password authentication | +| `realm` | string | No | - | Authentication realm (required for password auth) | +| `ssl.rejectUnauthorized` | boolean | No | true | Verify TLS certificates | +| `ssl.ca` | string | No | - | Path to custom CA certificate | +| `ssl.cert` | string | No | - | Path to client certificate | +| `ssl.key` | string | No | - | Path to client certificate key | +| `timeout` | number | No | 30000 | Request timeout in milliseconds | + +*Either `token` or `username`/`password` must be provided. + +### Environment Variables + +You can use environment variables for sensitive configuration: + +```bash +# Required +PROXMOX_HOST=proxmox.example.com +PROXMOX_PORT=8006 + +# Token authentication (recommended) +PROXMOX_TOKEN=user@realm!tokenid=uuid + +# OR password authentication +PROXMOX_USERNAME=root +PROXMOX_PASSWORD=secret +PROXMOX_REALM=pam + +# Optional SSL configuration +PROXMOX_SSL_VERIFY=true +PROXMOX_CA_CERT=/path/to/ca.pem +PROXMOX_CLIENT_CERT=/path/to/client.pem +PROXMOX_CLIENT_KEY=/path/to/client-key.pem +``` + +Then reference them in your configuration: + +```typescript +{ + integrations: { + proxmox: { + enabled: true, + name: 'proxmox', + type: 'both', + config: { + host: process.env.PROXMOX_HOST, + port: parseInt(process.env.PROXMOX_PORT || '8006'), + token: process.env.PROXMOX_TOKEN, + ssl: { + rejectUnauthorized: process.env.PROXMOX_SSL_VERIFY !== 'false', + ca: process.env.PROXMOX_CA_CERT, + cert: process.env.PROXMOX_CLIENT_CERT, + key: process.env.PROXMOX_CLIENT_KEY + } + } + } + } +} +``` + +## Authentication + +### Token Authentication (Recommended) + +Token authentication is more secure and provides fine-grained permission control. + +#### Creating an API Token + +1. Log in to your Proxmox web interface +2. Navigate to **Datacenter → Permissions → API Tokens** +3. Click **Add** to create a new token +4. Select the user and enter a token ID +5. Optionally disable **Privilege Separation** for full user permissions +6. Click **Add** and copy the generated token +7. The token format is: `user@realm!tokenid=uuid` + +#### Required Permissions + +Grant the following permissions to the token user: + +- `VM.Allocate` - Create VMs and containers +- `VM.Config.*` - Configure VMs and containers +- `VM.PowerMgmt` - Start, stop, and manage power state +- `VM.Audit` - Read VM information +- `Datastore.Allocate` - Allocate disk space + +#### Configuration Example + +```typescript +config: { + host: 'proxmox.example.com', + port: 8006, + token: 'automation@pve!api-token=12345678-1234-1234-1234-123456789abc' +} +``` + +### Password Authentication + +Password authentication uses username and password to obtain a temporary authentication ticket. + +#### Configuration Example + +```typescript +config: { + host: 'proxmox.example.com', + port: 8006, + username: 'root', + password: 'your-secure-password', + realm: 'pam' +} +``` + +#### Available Realms + +- `pam` - Linux PAM authentication +- `pve` - Proxmox VE authentication + +**Note**: Authentication tickets expire after 2 hours by default. The integration automatically refreshes tickets when they expire. + +## Inventory Discovery + +The Proxmox integration automatically discovers all VMs and containers in your cluster. + +### Node Format + +Each discovered guest is represented as a Node with the following format: + +```typescript +{ + id: 'proxmox:node-name:vmid', + name: 'vm-name', + status: 'running' | 'stopped' | 'paused', + ip: '192.168.1.100', // Optional + metadata: { + node: 'node-name', + type: 'qemu' | 'lxc', + vmid: 100, + source: 'proxmox' + } +} +``` + +### Groups + +Resources are automatically organized into groups: + +- **By Node**: `proxmox:node:node-name` - All guests on a specific Proxmox node +- **By Status**: `proxmox:status:running` - All guests with a specific status +- **By Type**: `proxmox:type:qemu` or `proxmox:type:lxc` - All VMs or all containers + +### Caching + +Inventory data is cached for 60 seconds to reduce API load. Groups are also cached for 60 seconds. + +## Facts Retrieval + +Get detailed information about a specific VM or container: + +```typescript +const facts = await integrationManager.getNodeFacts('proxmox:node1:100'); +``` + +Facts include: + +- CPU configuration (cores, sockets, CPU type) +- Memory configuration (total, current usage) +- Disk configuration (size, usage) +- Network configuration (interfaces, IP addresses) +- Current status and uptime +- Resource usage statistics (when running) + +Facts are cached for 30 seconds. + +## Lifecycle Actions + +### Supported Actions + +| Action | Description | Applies To | +|--------|-------------|------------| +| `start` | Start a VM or container | VMs, LXC | +| `stop` | Force stop a VM or container | VMs, LXC | +| `shutdown` | Gracefully shutdown a VM or container | VMs, LXC | +| `reboot` | Reboot a VM or container | VMs, LXC | +| `suspend` | Suspend a VM (save state to disk) | VMs only | +| `resume` | Resume a suspended VM | VMs only | + +### Action Examples + +#### Start a VM + +```typescript +const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'start', + parameters: {} +}); +``` + +#### Graceful Shutdown + +```typescript +const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'shutdown', + parameters: {} +}); +``` + +#### Suspend a VM + +```typescript +const result = await integrationManager.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'suspend', + parameters: {} +}); +``` + +### Action Results + +All actions return an `ExecutionResult`: + +```typescript +{ + success: true, + output: 'VM started successfully', + metadata: { + vmid: 100, + node: 'node1' + } +} +``` + +## Provisioning + +### Create a Virtual Machine + +```typescript +const result = await integrationManager.executeAction({ + type: 'provision', + action: 'create_vm', + parameters: { + vmid: 100, + name: 'my-vm', + node: 'node1', + cores: 2, + memory: 2048, + disk: 'local-lvm:32', + network: { + model: 'virtio', + bridge: 'vmbr0' + } + } +}); +``` + +#### VM Creation Parameters + +| Parameter | Type | Required | Default | Description | +|-----------|------|----------|---------|-------------| +| `vmid` | number | Yes | - | Unique VM ID (100-999999999) | +| `name` | string | Yes | - | VM name | +| `node` | string | Yes | - | Target Proxmox node | +| `cores` | number | No | 1 | Number of CPU cores | +| `memory` | number | No | 512 | Memory in MB | +| `sockets` | number | No | 1 | Number of CPU sockets | +| `cpu` | string | No | - | CPU type (e.g., 'host') | +| `disk` | string | No | - | Disk configuration (e.g., 'local-lvm:32') | +| `network` | object | No | - | Network configuration | +| `ostype` | string | No | - | OS type (e.g., 'l26' for Linux 2.6+) | + +### Create an LXC Container + +```typescript +const result = await integrationManager.executeAction({ + type: 'provision', + action: 'create_lxc', + parameters: { + vmid: 101, + hostname: 'my-container', + node: 'node1', + ostemplate: 'local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst', + cores: 1, + memory: 512, + rootfs: 'local-lvm:8', + network: { + name: 'eth0', + bridge: 'vmbr0', + ip: 'dhcp' + } + } +}); +``` + +#### LXC Creation Parameters + +| Parameter | Type | Required | Default | Description | +|-----------|------|----------|---------|-------------| +| `vmid` | number | Yes | - | Unique container ID (100-999999999) | +| `hostname` | string | Yes | - | Container hostname | +| `node` | string | Yes | - | Target Proxmox node | +| `ostemplate` | string | Yes | - | OS template path | +| `cores` | number | No | 1 | Number of CPU cores | +| `memory` | number | No | 512 | Memory in MB | +| `rootfs` | string | No | - | Root filesystem (e.g., 'local-lvm:8') | +| `network` | object | No | - | Network configuration | +| `password` | string | No | - | Root password | + +### Destroy a Guest + +```typescript +const result = await integrationManager.executeAction({ + type: 'provision', + action: 'destroy_vm', // or 'destroy_lxc' + parameters: { + vmid: 100, + node: 'node1' + } +}); +``` + +**Note**: If the guest is running, it will be automatically stopped before deletion. + +## Health Monitoring + +Check the health of the Proxmox integration: + +```typescript +const health = await integrationManager.healthCheckAll(); +const proxmoxHealth = health.get('proxmox'); + +console.log(proxmoxHealth); +// { +// healthy: true, +// message: 'Proxmox API is reachable', +// details: { version: '7.4-3' }, +// lastCheck: 1234567890 +// } +``` + +### Health States + +- **Healthy**: API is reachable and responding +- **Degraded**: Authentication issues detected +- **Unhealthy**: API is unreachable or returning errors + +Health check results are cached for 30 seconds. + +## Error Handling + +### Error Types + +The integration provides specific error types for different failure scenarios: + +- `ProxmoxAuthenticationError` - Authentication failures (401, 403) +- `ProxmoxConnectionError` - Network connectivity issues +- `ProxmoxError` - General API errors + +### Common Errors + +#### Authentication Failed + +``` +ProxmoxAuthenticationError: Failed to authenticate with Proxmox API +``` + +**Solution**: Verify your credentials or token are correct and have not expired. + +#### Guest Not Found + +``` +ProxmoxError: Guest 100 not found on node node1 +``` + +**Solution**: Verify the VMID and node name are correct. + +#### VMID Already Exists + +``` +VM with VMID 100 already exists on node node1 +``` + +**Solution**: Choose a different VMID or destroy the existing guest first. + +#### Connection Timeout + +``` +ProxmoxConnectionError: Request timeout after 30000ms +``` + +**Solution**: Check network connectivity to the Proxmox server or increase the timeout value. + +### Retry Logic + +The integration automatically retries transient failures: + +- Network timeouts (ETIMEDOUT) +- Connection resets (ECONNRESET) +- DNS resolution failures (ENOTFOUND) +- Rate limiting (429) +- Server errors (5xx) + +Retry configuration: + +- Maximum attempts: 3 +- Initial delay: 1 second +- Exponential backoff with 2x multiplier +- Maximum delay: 10 seconds + +## Troubleshooting + +### Connection Issues + +**Problem**: Cannot connect to Proxmox API + +**Solutions**: + +1. Verify the host and port are correct +2. Check firewall rules allow access to port 8006 +3. Ensure Proxmox API is enabled and running +4. Test connectivity: `curl -k https://proxmox.example.com:8006/api2/json/version` + +### Authentication Issues + +**Problem**: Authentication fails with valid credentials + +**Solutions**: + +1. For token auth: Verify the token format is `user@realm!tokenid=uuid` +2. For password auth: Verify the realm is correct (`pam` or `pve`) +3. Check user permissions in Proxmox +4. Verify the user account is not locked or expired + +### SSL Certificate Issues + +**Problem**: SSL certificate verification fails + +**Solutions**: + +1. For self-signed certificates, provide the CA certificate path: + + ```typescript + ssl: { + ca: '/path/to/ca.pem' + } + ``` + +2. For testing only, disable certificate verification: + + ```typescript + ssl: { + rejectUnauthorized: false + } + ``` + + **Warning**: This is insecure and should not be used in production. + +### Permission Issues + +**Problem**: Operations fail with permission denied errors + +**Solutions**: + +1. Verify the user has the required permissions: + - `VM.Allocate` for creating VMs + - `VM.PowerMgmt` for lifecycle actions + - `VM.Config.*` for configuration changes +2. Check permissions at both user and token level +3. Ensure permissions are set on the correct path (/, /vms/, etc.) + +### Performance Issues + +**Problem**: Slow response times or timeouts + +**Solutions**: + +1. Check Proxmox server load and performance +2. Increase cache TTL to reduce API calls +3. Increase timeout value in configuration +4. Use token authentication instead of password authentication +5. Monitor network latency between Pabawi and Proxmox + +### Task Timeout + +**Problem**: Long-running operations timeout + +**Solutions**: + +1. Increase the timeout value in configuration +2. Check Proxmox task logs for the specific operation +3. Verify sufficient resources are available on the target node +4. For VM creation, ensure the storage is not slow or full + +## Best Practices + +### Security + +1. **Use Token Authentication**: More secure than password authentication +2. **Enable Certificate Verification**: Always verify TLS certificates in production +3. **Least Privilege**: Grant only required permissions to API tokens +4. **Rotate Credentials**: Regularly rotate API tokens and passwords +5. **Secure Storage**: Store credentials in environment variables or secure vaults + +### Performance + +1. **Use Caching**: Default cache TTLs are optimized for most use cases +2. **Batch Operations**: When possible, perform multiple operations in parallel +3. **Monitor Health**: Regularly check integration health to detect issues early +4. **Connection Pooling**: The integration reuses connections automatically + +### Reliability + +1. **Handle Errors**: Always check `ExecutionResult.success` before proceeding +2. **Retry Logic**: The integration handles transient failures automatically +3. **Health Checks**: Monitor health status and alert on failures +4. **Logging**: Enable debug logging for troubleshooting + +### Operations + +1. **Test First**: Test provisioning operations in a development environment +2. **Unique VMIDs**: Use a VMID allocation strategy to avoid conflicts +3. **Resource Limits**: Monitor Proxmox cluster resources before provisioning +4. **Backup**: Always backup important VMs before destructive operations + +## API Reference + +### Integration Methods + +#### getInventory() + +Returns all VMs and containers in the Proxmox cluster. + +```typescript +const nodes = await proxmoxIntegration.getInventory(); +``` + +#### getGroups() + +Returns groups organized by node, status, and type. + +```typescript +const groups = await proxmoxIntegration.getGroups(); +``` + +#### getNodeFacts(nodeId: string) + +Returns detailed facts for a specific guest. + +```typescript +const facts = await proxmoxIntegration.getNodeFacts('proxmox:node1:100'); +``` + +#### executeAction(action: Action) + +Executes a lifecycle or provisioning action. + +```typescript +const result = await proxmoxIntegration.executeAction({ + type: 'lifecycle', + target: 'proxmox:node1:100', + action: 'start', + parameters: {} +}); +``` + +#### listCapabilities() + +Returns available lifecycle actions. + +```typescript +const capabilities = proxmoxIntegration.listCapabilities(); +``` + +#### listProvisioningCapabilities() + +Returns available provisioning operations. + +```typescript +const capabilities = proxmoxIntegration.listProvisioningCapabilities(); +``` + +#### performHealthCheck() + +Checks the health of the Proxmox connection. + +```typescript +const health = await proxmoxIntegration.performHealthCheck(); +``` + +## Examples + +See the [Configuration Examples](../examples/proxmox-examples.md) document for complete working examples. + +## Support + +For issues, questions, or contributions: + +- GitHub Issues: [pabawi/issues](https://github.com/pabawi/pabawi/issues) +- Documentation: [pabawi.dev/docs](https://pabawi.dev/docs) +- Proxmox API Docs: [pve.proxmox.com/pve-docs/api-viewer](https://pve.proxmox.com/pve-docs/api-viewer/) diff --git a/docs/manage-tab-guide.md b/docs/manage-tab-guide.md new file mode 100644 index 00000000..90dd4860 --- /dev/null +++ b/docs/manage-tab-guide.md @@ -0,0 +1,851 @@ +# Manage Tab Usage Guide + +## Overview + +The Manage tab on the Node Detail page provides lifecycle management controls for virtual machines and containers. This guide explains how to use the Manage tab to start, stop, reboot, and destroy resources through the Pabawi interface. + +## Table of Contents + +- [Accessing the Manage Tab](#accessing-the-manage-tab) +- [Understanding the Interface](#understanding-the-interface) +- [Lifecycle Actions](#lifecycle-actions) +- [Action Availability](#action-availability) +- [Destructive Actions](#destructive-actions) +- [Monitoring Operations](#monitoring-operations) +- [Best Practices](#best-practices) +- [Troubleshooting](#troubleshooting) + +## Accessing the Manage Tab + +### Prerequisites + +Before you can use the Manage tab: + +- **Management Permissions**: Your user account must have lifecycle management permissions +- **Configured Integration**: The resource must be managed by a configured integration (e.g., Proxmox) +- **Resource Access**: You must have access to the specific resource + +### Navigation + +1. **From Inventory**: + - Navigate to the Inventory page + - Click on a VM or container + - The Node Detail page opens + +2. **Locate the Manage Tab**: + - Look for the tab navigation on the Node Detail page + - Tabs may include: Overview, Facts, Manage, Reports + - Click on the **"Manage"** tab + +3. **Permission-Based Access**: + - The Manage tab only appears if you have lifecycle permissions + - If you don't see the tab, you may lack management permissions + - Contact your administrator for access + +## Understanding the Interface + +### Page Layout + +The Manage tab consists of several sections: + +1. **Resource Status**: + - Current state of the resource (running, stopped, suspended) + - Status indicator (green, red, yellow) + - Last updated timestamp + +2. **Available Actions**: + - Action buttons for lifecycle operations + - Buttons are enabled/disabled based on resource state + - Only actions you have permission for are shown + +3. **Action History** (optional): + - Recent actions performed on this resource + - Action status and timestamps + - Links to execution details + +4. **Resource Information**: + - Resource type (VM or LXC) + - Integration managing the resource + - Node location + +### Status Indicators + +**Resource States**: + +- **Running** (Green): Resource is active and operational +- **Stopped** (Red): Resource is powered off +- **Suspended** (Yellow): VM is suspended (saved to disk) +- **Paused** (Yellow): VM is paused (saved to memory) +- **Unknown** (Gray): Status cannot be determined + +**Action Status**: + +- **Available**: Button is enabled and clickable +- **Unavailable**: Button is disabled (grayed out) +- **In Progress**: Loading indicator, all buttons disabled +- **Hidden**: Action not available for this resource type or state + +## Lifecycle Actions + +### Start Action + +**Purpose**: Power on a stopped VM or container + +**When Available**: + +- Resource state is "stopped" +- You have `lifecycle:start` permission + +**How to Use**: + +1. Verify resource is stopped (red status indicator) +2. Click the **"Start"** button +3. Wait for operation to complete (typically 5-30 seconds) +4. Success notification appears +5. Resource status updates to "running" + +**What Happens**: + +- VM/container boots up +- Operating system starts +- Network interfaces activate +- Services start automatically + +**Use Cases**: + +- Starting a VM after maintenance +- Bringing a container online +- Recovering from a shutdown + +**Example**: + +``` +Resource: web-server-01 +Current State: Stopped +Action: Start +Result: Resource started successfully +New State: Running +``` + +### Stop Action + +**Purpose**: Force power off a running VM or container + +**When Available**: + +- Resource state is "running" +- You have `lifecycle:stop` permission + +**How to Use**: + +1. Verify resource is running (green status indicator) +2. Click the **"Stop"** button +3. Wait for operation to complete (typically 5-15 seconds) +4. Success notification appears +5. Resource status updates to "stopped" + +**What Happens**: + +- VM/container is immediately powered off +- Similar to pulling the power plug +- No graceful shutdown +- May cause data loss if not saved + +**Warning**: This is a forced stop. Use "Shutdown" for graceful shutdown. + +**Use Cases**: + +- Emergency stop +- Unresponsive VM/container +- When graceful shutdown fails + +**Example**: + +``` +Resource: app-server-02 +Current State: Running +Action: Stop +Result: Resource stopped successfully +New State: Stopped +``` + +### Shutdown Action + +**Purpose**: Gracefully shut down a running VM or container + +**When Available**: + +- Resource state is "running" +- You have `lifecycle:shutdown` permission + +**How to Use**: + +1. Verify resource is running (green status indicator) +2. Click the **"Shutdown"** button +3. Wait for operation to complete (typically 30-120 seconds) +4. Success notification appears +5. Resource status updates to "stopped" + +**What Happens**: + +- Shutdown signal sent to guest OS +- Operating system performs graceful shutdown +- Services stop cleanly +- Data is saved +- VM/container powers off + +**Advantages**: + +- Safe shutdown process +- No data loss +- Services stop cleanly +- Recommended method + +**Use Cases**: + +- Normal shutdown operations +- Maintenance preparation +- Before taking snapshots +- Planned downtime + +**Example**: + +``` +Resource: database-prod +Current State: Running +Action: Shutdown +Result: Resource shutdown successfully +New State: Stopped +Duration: 45 seconds +``` + +### Reboot Action + +**Purpose**: Restart a running VM or container + +**When Available**: + +- Resource state is "running" +- You have `lifecycle:reboot` permission + +**How to Use**: + +1. Verify resource is running (green status indicator) +2. Click the **"Reboot"** button +3. Wait for operation to complete (typically 30-90 seconds) +4. Success notification appears +5. Resource status remains "running" (after reboot) + +**What Happens**: + +- Reboot signal sent to guest OS +- Operating system performs graceful reboot +- Services restart +- VM/container comes back online + +**Use Cases**: + +- Applying system updates +- Clearing memory issues +- Restarting services +- Configuration changes + +**Example**: + +``` +Resource: web-server-03 +Current State: Running +Action: Reboot +Result: Resource rebooted successfully +New State: Running +Duration: 60 seconds +``` + +### Suspend Action + +**Purpose**: Suspend a running VM (save state to disk) + +**When Available**: + +- Resource type is VM (not available for LXC) +- Resource state is "running" +- You have `lifecycle:suspend` permission + +**How to Use**: + +1. Verify resource is a VM and running +2. Click the **"Suspend"** button +3. Wait for operation to complete (typically 10-60 seconds) +4. Success notification appears +5. Resource status updates to "suspended" + +**What Happens**: + +- VM state saved to disk +- Memory contents written to storage +- VM powered off +- Can be resumed later with exact state + +**Advantages**: + +- Faster than shutdown/start +- Preserves exact state +- No boot time when resuming +- Applications remain open + +**Use Cases**: + +- Temporary pause +- Saving work state +- Quick maintenance +- Resource conservation + +**Example**: + +``` +Resource: dev-workstation +Current State: Running +Action: Suspend +Result: VM suspended successfully +New State: Suspended +Duration: 25 seconds +``` + +### Resume Action + +**Purpose**: Resume a suspended VM + +**When Available**: + +- Resource type is VM +- Resource state is "suspended" +- You have `lifecycle:resume` permission + +**How to Use**: + +1. Verify resource is suspended (yellow status indicator) +2. Click the **"Resume"** button +3. Wait for operation to complete (typically 5-30 seconds) +4. Success notification appears +5. Resource status updates to "running" + +**What Happens**: + +- VM state restored from disk +- Memory contents loaded +- VM resumes exactly where it left off +- Applications continue running + +**Advantages**: + +- Instant resume +- No boot process +- Applications remain open +- Work state preserved + +**Use Cases**: + +- Resuming after suspend +- Quick return to work +- Continuing interrupted tasks + +**Example**: + +``` +Resource: dev-workstation +Current State: Suspended +Action: Resume +Result: VM resumed successfully +New State: Running +Duration: 15 seconds +``` + +### Destroy Action + +**Purpose**: Permanently delete a VM or container + +**When Available**: + +- Any resource state +- You have `lifecycle:destroy` permission + +**How to Use**: + +1. Click the **"Destroy"** button +2. **Confirmation dialog appears**: + - Shows resource name and ID + - Warns about permanent deletion + - Requires explicit confirmation +3. Review the warning carefully +4. Click **"Confirm"** to proceed or **"Cancel"** to abort +5. If confirmed, wait for operation to complete +6. Success notification appears +7. Redirected away from node detail page + +**What Happens**: + +- VM/container is stopped (if running) +- All data is deleted +- Disk images removed +- Configuration deleted +- Resource removed from inventory + +**Warning**: This action is permanent and cannot be undone! + +**Use Cases**: + +- Decommissioning resources +- Cleaning up test environments +- Removing failed deployments +- Freeing up resources + +**Safety Features**: + +- Confirmation dialog required +- Resource name displayed for verification +- Cannot be performed accidentally +- Logged for audit purposes + +**Example**: + +``` +Resource: test-vm-temp +Current State: Stopped +Action: Destroy +Confirmation: "Are you sure you want to destroy test-vm-temp (ID: 105)?" +User Action: Confirm +Result: Resource destroyed successfully +``` + +## Action Availability + +### State-Based Availability + +Actions are only available when the resource is in an appropriate state: + +**When Stopped**: + +- ✓ Start +- ✗ Stop +- ✗ Shutdown +- ✗ Reboot +- ✗ Suspend +- ✗ Resume +- ✓ Destroy + +**When Running**: + +- ✗ Start +- ✓ Stop +- ✓ Shutdown +- ✓ Reboot +- ✓ Suspend (VMs only) +- ✗ Resume +- ✓ Destroy + +**When Suspended**: + +- ✗ Start +- ✗ Stop +- ✗ Shutdown +- ✗ Reboot +- ✗ Suspend +- ✓ Resume +- ✓ Destroy + +### Permission-Based Availability + +Actions are only visible if you have the required permission: + +**Required Permissions**: + +- Start: `lifecycle:start` +- Stop: `lifecycle:stop` +- Shutdown: `lifecycle:shutdown` +- Reboot: `lifecycle:reboot` +- Suspend: `lifecycle:suspend` +- Resume: `lifecycle:resume` +- Destroy: `lifecycle:destroy` + +**Permission Wildcards**: + +- `lifecycle:*` - All lifecycle actions +- `*:lifecycle:*` - All lifecycle actions on all integrations + +### Resource Type Restrictions + +Some actions are only available for specific resource types: + +**VM Only**: + +- Suspend +- Resume + +**Both VM and LXC**: + +- Start +- Stop +- Shutdown +- Reboot +- Destroy + +### Integration-Specific Actions + +Different integrations may support different actions: + +**Proxmox**: + +- All actions supported + +**Future Integrations**: + +- EC2: Start, Stop, Reboot, Terminate +- Azure: Start, Stop, Restart, Delete +- May have integration-specific actions + +## Destructive Actions + +### Understanding Destructive Actions + +**Destructive Actions** are operations that permanently delete data or resources: + +- **Destroy**: Permanently deletes VM/container + +**Non-Destructive Actions**: + +- Start, Stop, Shutdown, Reboot, Suspend, Resume + +### Safety Mechanisms + +**Confirmation Dialogs**: + +- Required for all destructive actions +- Display resource name and ID +- Show warning message +- Require explicit confirmation +- Cannot be bypassed + +**Visual Indicators**: + +- Destroy button styled differently (red) +- Warning icons displayed +- Confirmation dialog uses warning colors + +**Audit Logging**: + +- All destructive actions logged +- User, timestamp, and resource recorded +- Available for compliance and auditing + +### Best Practices for Destructive Actions + +**Before Destroying**: + +1. **Verify Resource**: + - Confirm you have the correct resource + - Check resource name and ID + - Review resource details + +2. **Backup Data**: + - Take snapshots if needed + - Backup important data + - Export configurations + +3. **Check Dependencies**: + - Verify no other resources depend on this one + - Check for network dependencies + - Review application dependencies + +4. **Communicate**: + - Notify team members + - Update documentation + - Record the action + +**During Destruction**: + +1. Read confirmation dialog carefully +2. Verify resource name matches +3. Confirm you want to proceed +4. Wait for operation to complete +5. Don't interrupt the process + +**After Destruction**: + +1. Verify resource is removed +2. Check inventory is updated +3. Update documentation +4. Notify stakeholders + +## Monitoring Operations + +### Real-Time Feedback + +During lifecycle operations: + +1. **Loading Indicators**: + - All action buttons disabled + - Spinner or progress indicator appears + - Status shows "Operation in progress" + +2. **Status Updates**: + - Operation progress displayed + - Current step shown (if available) + - Estimated time remaining + +3. **Completion Notifications**: + - Success: Green toast notification + - Failure: Red toast notification with error + - Auto-dismiss (success) or manual dismiss (errors) + +### Status Refresh + +After operations complete: + +1. **Automatic Refresh**: + - Resource status automatically updates + - New state reflected in UI + - Available actions update + +2. **Manual Refresh**: + - Click refresh button if needed + - Reload page to force update + - Check execution history + +### Execution History + +View past operations: + +1. **On Node Detail Page**: + - Scroll to Execution History section + - View recent operations on this resource + - Filter by action type + +2. **On Executions Page**: + - Navigate to Executions from main menu + - Filter by node name + - View detailed execution logs + +## Best Practices + +### Planning Operations + +**Before Performing Actions**: + +1. **Verify Resource State**: + - Check current status + - Ensure resource is in expected state + - Review recent changes + +2. **Check Dependencies**: + - Identify dependent services + - Check for active connections + - Review application dependencies + +3. **Plan Timing**: + - Choose appropriate time window + - Consider user impact + - Schedule during maintenance windows + +4. **Communicate**: + - Notify affected users + - Update team members + - Document planned actions + +### Safe Operations + +**Operational Safety**: + +1. **Use Graceful Actions**: + - Prefer Shutdown over Stop + - Allow time for graceful shutdown + - Don't force stop unless necessary + +2. **Monitor Progress**: + - Watch for completion + - Check for errors + - Verify expected outcome + +3. **Verify Results**: + - Confirm resource is in expected state + - Test functionality after changes + - Check dependent services + +4. **Document Actions**: + - Record what was done + - Note any issues + - Update runbooks + +### Emergency Procedures + +**When Things Go Wrong**: + +1. **Unresponsive Resource**: + - Try graceful shutdown first + - Wait reasonable time + - Use force stop if necessary + - Document the issue + +2. **Failed Operations**: + - Review error message + - Check resource state + - Try again if appropriate + - Contact administrator if needed + +3. **Unexpected Behavior**: + - Don't panic + - Document what happened + - Check logs + - Seek help if needed + +## Troubleshooting + +### Problem: Manage Tab Not Visible + +**Symptoms**: + +- Manage tab missing from Node Detail page +- Cannot access lifecycle actions + +**Solutions**: + +1. **Check Permissions**: + - Verify you have lifecycle permissions + - Contact administrator for access + - Review your assigned roles + +2. **Check Resource Type**: + - Verify resource is managed by an integration + - Check integration is configured + - Ensure integration is connected + +3. **Refresh Page**: + - Reload the page + - Clear browser cache + - Log out and log back in + +### Problem: All Action Buttons Disabled + +**Symptoms**: + +- Action buttons appear but are grayed out +- Cannot click any actions +- No actions available message + +**Solutions**: + +1. **Check Resource State**: + - Verify resource state allows actions + - Example: Can't start a running VM + - Wait for current operation to complete + +2. **Check Permissions**: + - Verify you have required permissions + - Check specific action permissions + - Contact administrator if needed + +3. **Check Integration Health**: + - Verify integration is connected + - Test integration connectivity + - Check for integration errors + +### Problem: Action Fails with Error + +**Symptoms**: + +``` +Error: Failed to start VM +Error: Operation timeout +Error: Resource not found +``` + +**Solutions**: + +1. **Review Error Message**: + - Read error carefully + - Look for specific error codes + - Note any suggested actions + +2. **Check Resource State**: + - Verify resource exists + - Check resource is accessible + - Ensure resource is in expected state + +3. **Check Integration**: + - Verify integration is connected + - Test integration health + - Check integration logs + +4. **Retry Operation**: + - Wait a moment + - Try again + - Contact administrator if persists + +### Problem: Operation Hangs + +**Symptoms**: + +- Operation never completes +- Loading indicator stays forever +- No error or success message + +**Solutions**: + +1. **Wait Longer**: + - Some operations take time + - Shutdown can take 2-3 minutes + - Check resource directly if possible + +2. **Check Resource**: + - Navigate to integration directly (e.g., Proxmox web UI) + - Verify operation status + - Check for errors + +3. **Refresh Page**: + - Reload the page + - Check if operation completed + - Review execution history + +4. **Contact Administrator**: + - Report the issue + - Provide operation details + - Include error messages + +### Problem: Destroy Confirmation Not Appearing + +**Symptoms**: + +- Clicked Destroy but nothing happens +- No confirmation dialog +- Action seems to do nothing + +**Solutions**: + +1. **Check Browser**: + - Disable popup blockers + - Allow dialogs from Pabawi + - Try different browser + +2. **Check JavaScript**: + - Ensure JavaScript is enabled + - Check browser console for errors + - Clear browser cache + +3. **Refresh Page**: + - Reload the page + - Try action again + - Log out and log back in + +## Related Documentation + +- [Provisioning Guide](provisioning-guide.md) - How to create VMs and containers +- [Permissions and RBAC](permissions-rbac.md) - Permission requirements +- [Proxmox Integration](integrations/proxmox.md) - Proxmox-specific details +- [Troubleshooting Guide](troubleshooting.md) - General troubleshooting + +## Support + +For additional help: + +- **Documentation**: [pabawi.dev/docs](https://pabawi.dev/docs) +- **GitHub Issues**: [pabawi/issues](https://github.com/pabawi/pabawi/issues) +- **Administrator**: Contact your Pabawi administrator for assistance diff --git a/docs/permissions-rbac.md b/docs/permissions-rbac.md new file mode 100644 index 00000000..48ccc6b8 --- /dev/null +++ b/docs/permissions-rbac.md @@ -0,0 +1,736 @@ +# Permissions and RBAC Guide + +## Overview + +Pabawi implements Role-Based Access Control (RBAC) to manage user permissions for provisioning and infrastructure management operations. This guide explains the permission system, required permissions for each action, and how permissions affect the user interface. + +## Table of Contents + +- [Understanding RBAC](#understanding-rbac) +- [Permission Levels](#permission-levels) +- [Provisioning Permissions](#provisioning-permissions) +- [Management Permissions](#management-permissions) +- [UI Visibility Rules](#ui-visibility-rules) +- [Permission Enforcement](#permission-enforcement) +- [Configuring Permissions](#configuring-permissions) +- [Troubleshooting](#troubleshooting) + +## Understanding RBAC + +### What is RBAC? + +Role-Based Access Control (RBAC) is a security model that restricts system access based on user roles. In Pabawi: + +- **Users** are assigned **Roles** +- **Roles** contain **Permissions** +- **Permissions** grant access to specific **Actions** + +### Key Concepts + +**User**: + +- Individual account in Pabawi +- Can have one or more roles +- Inherits permissions from all assigned roles + +**Role**: + +- Named collection of permissions +- Examples: Administrator, Operator, Viewer +- Can be assigned to multiple users + +**Permission**: + +- Specific authorization to perform an action +- Examples: `provision:create_vm`, `vm:start`, `vm:destroy` +- Granular control over operations + +**Action**: + +- Specific operation in the system +- Examples: Create VM, Start VM, View Inventory +- Requires corresponding permission + +### Permission Model + +Pabawi uses a hierarchical permission model: + +``` +Integration Level + └─ Action Type + └─ Specific Action + └─ Resource Type (optional) +``` + +Examples: + +- `proxmox:provision:create_vm` - Create VMs via Proxmox +- `proxmox:lifecycle:start` - Start VMs/containers +- `proxmox:lifecycle:destroy` - Destroy VMs/containers +- `*:provision:*` - All provisioning actions on all integrations +- `*:*:*` - All actions (administrator) + +## Permission Levels + +### Administrator + +**Full Access**: All permissions across all integrations + +**Permissions**: + +- `*:*:*` (wildcard - all actions) + +**Capabilities**: + +- Create, modify, and delete VMs and containers +- Start, stop, and manage all resources +- Configure integrations +- Manage user permissions +- Access all features + +**UI Access**: + +- All menu items visible +- All actions available +- No restrictions + +### Operator + +**Operational Access**: Provision and manage resources + +**Permissions**: + +- `*:provision:*` - All provisioning actions +- `*:lifecycle:*` - All lifecycle actions +- `*:inventory:read` - View inventory + +**Capabilities**: + +- Create VMs and containers +- Start, stop, reboot resources +- View inventory and facts +- Cannot destroy resources +- Cannot configure integrations + +**UI Access**: + +- Provision menu visible +- Manage tab visible (limited actions) +- Setup menu hidden + +### Viewer + +**Read-Only Access**: View resources only + +**Permissions**: + +- `*:inventory:read` - View inventory +- `*:facts:read` - View facts + +**Capabilities**: + +- View inventory +- View node details +- View facts +- Cannot modify anything + +**UI Access**: + +- Inventory menu visible +- Node detail pages visible (read-only) +- Provision menu hidden +- Manage tab hidden + +### Custom Roles + +Organizations can create custom roles with specific permission combinations: + +**Example: VM Manager**: + +- `proxmox:provision:create_vm` - Create VMs only +- `proxmox:lifecycle:start` - Start VMs +- `proxmox:lifecycle:stop` - Stop VMs +- `proxmox:lifecycle:reboot` - Reboot VMs + +**Example: Container Manager**: + +- `proxmox:provision:create_lxc` - Create containers only +- `proxmox:lifecycle:*` - All lifecycle actions for containers + +**Example: Development Team**: + +- `proxmox:provision:*` - All provisioning (dev environment only) +- `proxmox:lifecycle:*` - All lifecycle actions +- `proxmox:lifecycle:destroy` - Can destroy resources + +## Provisioning Permissions + +### Create VM Permission + +**Permission**: `:provision:create_vm` + +**Grants Access To**: + +- Provision page (VM tab) +- VM creation form +- Submit VM creation requests + +**Required For**: + +- Creating new virtual machines +- Accessing VM provisioning interface + +**UI Impact**: + +- Provision menu item appears (if any provision permission exists) +- VM tab visible on Provision page +- VM creation form enabled + +**Example**: + +``` +proxmox:provision:create_vm +ec2:provision:create_vm +*:provision:create_vm (all integrations) +``` + +### Create LXC Permission + +**Permission**: `:provision:create_lxc` + +**Grants Access To**: + +- Provision page (LXC tab) +- LXC creation form +- Submit LXC creation requests + +**Required For**: + +- Creating new LXC containers +- Accessing LXC provisioning interface + +**UI Impact**: + +- Provision menu item appears (if any provision permission exists) +- LXC tab visible on Provision page +- LXC creation form enabled + +**Example**: + +``` +proxmox:provision:create_lxc +*:provision:create_lxc (all integrations) +``` + +### General Provisioning Permission + +**Permission**: `:provision:*` + +**Grants Access To**: + +- All provisioning actions for the integration +- Both VM and LXC creation +- Future provisioning capabilities + +**Required For**: + +- Full provisioning access +- Creating any resource type + +**UI Impact**: + +- Provision menu item appears +- All provisioning tabs visible +- All creation forms enabled + +**Example**: + +``` +proxmox:provision:* +*:provision:* (all integrations, all resource types) +``` + +## Management Permissions + +### Lifecycle Actions + +#### Start Permission + +**Permission**: `:lifecycle:start` + +**Grants Access To**: + +- Start button on Manage tab +- Start action for stopped VMs/containers + +**Required For**: + +- Starting stopped resources + +**UI Impact**: + +- Start button visible when resource is stopped +- Start action enabled in action menu + +#### Stop Permission + +**Permission**: `:lifecycle:stop` + +**Grants Access To**: + +- Stop button on Manage tab +- Force stop action for running VMs/containers + +**Required For**: + +- Stopping running resources (forced) + +**UI Impact**: + +- Stop button visible when resource is running +- Stop action enabled in action menu + +#### Shutdown Permission + +**Permission**: `:lifecycle:shutdown` + +**Grants Access To**: + +- Shutdown button on Manage tab +- Graceful shutdown action + +**Required For**: + +- Gracefully shutting down resources + +**UI Impact**: + +- Shutdown button visible when resource is running +- Shutdown action enabled in action menu + +#### Reboot Permission + +**Permission**: `:lifecycle:reboot` + +**Grants Access To**: + +- Reboot button on Manage tab +- Reboot action for running VMs/containers + +**Required For**: + +- Rebooting resources + +**UI Impact**: + +- Reboot button visible when resource is running +- Reboot action enabled in action menu + +#### Suspend/Resume Permissions + +**Permission**: + +- `:lifecycle:suspend` +- `:lifecycle:resume` + +**Grants Access To**: + +- Suspend button (VMs only) +- Resume button (suspended VMs) + +**Required For**: + +- Suspending running VMs +- Resuming suspended VMs + +**UI Impact**: + +- Suspend button visible when VM is running +- Resume button visible when VM is suspended + +#### Destroy Permission + +**Permission**: `:lifecycle:destroy` + +**Grants Access To**: + +- Destroy button on Manage tab +- Delete VM/container action +- Confirmation dialog + +**Required For**: + +- Permanently deleting resources + +**UI Impact**: + +- Destroy button visible (with confirmation) +- Destroy action enabled in action menu +- Warning indicators shown + +**Security Note**: This is a destructive action. Grant carefully. + +### General Lifecycle Permission + +**Permission**: `:lifecycle:*` + +**Grants Access To**: + +- All lifecycle actions for the integration +- Start, stop, reboot, suspend, resume, destroy + +**Required For**: + +- Full lifecycle management access + +**UI Impact**: + +- Manage tab visible +- All action buttons visible (based on resource state) +- All lifecycle operations enabled + +**Example**: + +``` +proxmox:lifecycle:* +*:lifecycle:* (all integrations) +``` + +## UI Visibility Rules + +### Menu Items + +**Provision Menu**: + +- **Visible**: User has any `*:provision:*` permission +- **Hidden**: User has no provisioning permissions + +**Inventory Menu**: + +- **Visible**: User has `*:inventory:read` permission +- **Hidden**: User has no inventory read permission + +**Setup Menu**: + +- **Visible**: User has administrator role +- **Hidden**: Non-administrator users + +### Page Elements + +**Provision Page**: + +- **VM Tab**: Visible if user has `*:provision:create_vm` +- **LXC Tab**: Visible if user has `*:provision:create_lxc` +- **Integration Selector**: Shows only integrations user can access + +**Node Detail Page**: + +- **Manage Tab**: Visible if user has any `*:lifecycle:*` permission +- **Facts Section**: Visible if user has `*:facts:read` permission +- **Configuration Section**: Always visible (read-only) + +**Manage Tab**: + +- **Action Buttons**: Only visible if user has corresponding permission +- **Destroy Button**: Only visible if user has `*:lifecycle:destroy` +- **No Actions Message**: Shown if user has no lifecycle permissions + +### Form Elements + +**Provisioning Forms**: + +- **Submit Button**: Enabled only if user has create permission +- **Form Fields**: All visible (validation applies) +- **Integration Dropdown**: Shows only permitted integrations + +**Action Buttons**: + +- **Enabled**: User has permission and resource state allows action +- **Disabled**: User lacks permission or resource state prevents action +- **Hidden**: User has no related permissions + +## Permission Enforcement + +### Frontend Enforcement + +**UI-Level Security**: + +- Menu items hidden based on permissions +- Buttons disabled or hidden +- Forms not rendered without permissions +- Provides user-friendly experience + +**Limitations**: + +- Not a security boundary +- Can be bypassed by API calls +- Relies on backend enforcement + +### Backend Enforcement + +**API-Level Security**: + +- All API endpoints check permissions +- Requests without permission return 403 Forbidden +- Cannot be bypassed +- True security boundary + +**Enforcement Points**: + +1. **Authentication**: Verify user is logged in +2. **Authorization**: Check user has required permission +3. **Resource Access**: Verify user can access specific resource +4. **Action Execution**: Validate permission before executing + +**Error Responses**: + +```json +{ + "error": { + "code": "PERMISSION_DENIED", + "message": "User does not have permission to perform this action", + "requiredPermission": "proxmox:provision:create_vm" + } +} +``` + +### Permission Checks + +**Before Every Action**: + +1. Extract user from authentication token +2. Load user's roles and permissions +3. Check if user has required permission +4. Allow or deny action +5. Log authorization decision + +**Permission Matching**: + +- Exact match: `proxmox:provision:create_vm` +- Wildcard integration: `*:provision:create_vm` +- Wildcard action: `proxmox:provision:*` +- Full wildcard: `*:*:*` + +## Configuring Permissions + +### User Management + +**Creating Users**: + +1. Navigate to Admin → Users +2. Click "Add User" +3. Enter user details +4. Assign roles +5. Save + +**Assigning Roles**: + +1. Navigate to user details +2. Click "Edit Roles" +3. Select roles from list +4. Save changes +5. User inherits all role permissions + +### Role Management + +**Creating Roles**: + +1. Navigate to Admin → Roles +2. Click "Add Role" +3. Enter role name and description +4. Select permissions +5. Save + +**Editing Roles**: + +1. Navigate to role details +2. Click "Edit Permissions" +3. Add or remove permissions +4. Save changes +5. All users with role get updated permissions + +### Permission Syntax + +**Format**: `::` + +**Components**: + +- **Integration**: `proxmox`, `ec2`, `azure`, or `*` (all) +- **Action Type**: `provision`, `lifecycle`, `inventory`, `facts`, or `*` (all) +- **Specific Action**: `create_vm`, `start`, `destroy`, or `*` (all) + +**Examples**: + +``` +proxmox:provision:create_vm # Specific permission +proxmox:provision:* # All provisioning on Proxmox +*:provision:create_vm # Create VMs on any integration +proxmox:lifecycle:* # All lifecycle actions on Proxmox +*:*:* # All permissions (admin) +``` + +### Best Practices + +**Principle of Least Privilege**: + +- Grant minimum permissions needed +- Start with restrictive permissions +- Add permissions as needed +- Review permissions regularly + +**Role Design**: + +- Create roles for job functions +- Don't create user-specific roles +- Use descriptive role names +- Document role purposes + +**Permission Auditing**: + +- Review permissions quarterly +- Remove unused permissions +- Check for over-privileged users +- Log permission changes + +**Separation of Duties**: + +- Separate provisioning and destruction +- Different roles for different environments +- Require approval for sensitive actions + +## Troubleshooting + +### Problem: "Permission Denied" Error + +**Symptoms**: + +```json +{ + "error": { + "code": "PERMISSION_DENIED", + "message": "User does not have permission to perform this action" + } +} +``` + +**Solutions**: + +1. **Check User Roles**: + - Navigate to user profile + - Review assigned roles + - Verify roles are active + +2. **Check Role Permissions**: + - Navigate to role details + - Review permissions list + - Verify required permission is included + +3. **Check Permission Format**: + - Verify permission syntax is correct + - Check for typos + - Ensure wildcards are used correctly + +4. **Contact Administrator**: + - Request required permission + - Explain use case + - Provide error details + +### Problem: Menu Items Not Visible + +**Symptoms**: + +- Provision menu missing +- Manage tab not showing +- Expected features hidden + +**Solutions**: + +1. **Verify Permissions**: + - Check user has required permissions + - Review role assignments + - Confirm permissions are active + +2. **Check Integration Status**: + - Verify integration is enabled + - Check integration is connected + - Test integration health + +3. **Clear Browser Cache**: + - Clear browser cache and cookies + - Refresh the page + - Log out and log back in + +4. **Check Permission Propagation**: + - Permissions may take time to propagate + - Wait a few minutes + - Refresh the page + +### Problem: Action Buttons Disabled + +**Symptoms**: + +- Buttons appear but are disabled +- Cannot click action buttons +- Grayed out controls + +**Solutions**: + +1. **Check Resource State**: + - Verify resource is in correct state for action + - Example: Can't start a running VM + - Check resource status + +2. **Check Permissions**: + - Verify user has permission for specific action + - Check role includes required permission + - Review permission wildcards + +3. **Check Integration Health**: + - Verify integration is connected + - Test integration connectivity + - Check for integration errors + +### Problem: Inconsistent Permissions + +**Symptoms**: + +- Permissions work in some places but not others +- Inconsistent UI behavior +- Some actions allowed, others denied + +**Solutions**: + +1. **Check Permission Wildcards**: + - Verify wildcard usage is correct + - Check for conflicting permissions + - Review permission hierarchy + +2. **Check Multiple Roles**: + - User may have multiple roles + - Permissions are combined + - Check all assigned roles + +3. **Check Backend Logs**: + - Review authorization logs + - Look for permission check failures + - Identify specific permission issues + +4. **Verify Permission Sync**: + - Ensure frontend and backend are in sync + - Check for caching issues + - Restart services if needed + +## Related Documentation + +- [Provisioning Guide](provisioning-guide.md) - How to use provisioning features +- [Manage Tab Guide](manage-tab-guide.md) - How to manage resources +- [Proxmox Setup Guide](proxmox-setup-guide.md) - Configure Proxmox integration +- [User Guide](user-guide.md) - General Pabawi usage + +## Support + +For additional help: + +- **Documentation**: [pabawi.dev/docs](https://pabawi.dev/docs) +- **GitHub Issues**: [pabawi/issues](https://github.com/pabawi/pabawi/issues) +- **Administrator**: Contact your Pabawi administrator for permission requests diff --git a/docs/provisioning-guide.md b/docs/provisioning-guide.md new file mode 100644 index 00000000..84a434b1 --- /dev/null +++ b/docs/provisioning-guide.md @@ -0,0 +1,622 @@ +# Provisioning Guide + +## Overview + +The Provisioning page in Pabawi allows you to create new virtual machines (VMs) and Linux containers (LXC) through integrated provisioning systems. This guide covers how to access and use the provisioning interface to deploy new infrastructure resources. + +## Table of Contents + +- [Accessing the Provision Page](#accessing-the-provision-page) +- [Understanding the Interface](#understanding-the-interface) +- [Creating Virtual Machines](#creating-virtual-machines) +- [Creating LXC Containers](#creating-lxc-containers) +- [Monitoring Provisioning Operations](#monitoring-provisioning-operations) +- [Best Practices](#best-practices) +- [Troubleshooting](#troubleshooting) + +## Accessing the Provision Page + +### Prerequisites + +Before you can access the Provision page, you need: + +- **Provisioning Permissions**: Your user account must have provisioning permissions +- **Configured Integration**: At least one provisioning integration (e.g., Proxmox) must be configured and connected +- **Available Resources**: The target infrastructure must have available resources (CPU, memory, storage) + +### Navigation + +1. **From the Main Menu**: + - Look for the **"Provision"** menu item in the top navigation bar + - Click on "Provision" to access the provisioning interface + - If you don't see this menu item, you may lack provisioning permissions + +2. **Permission-Based Access**: + - The Provision menu item only appears for users with provisioning permissions + - Contact your administrator if you need access + +## Understanding the Interface + +### Page Layout + +The Provision page consists of several key sections: + +1. **Integration Selector** (if multiple integrations available): + - Dropdown or tabs to switch between different provisioning systems + - Shows available integrations (Proxmox, EC2, Azure, etc.) + - Displays connection status for each integration + +2. **Provisioning Forms**: + - Tabbed interface for different resource types (VM, LXC) + - Form fields for configuration parameters + - Real-time validation feedback + - Submit button (enabled when form is valid) + +3. **Status Indicators**: + - Loading indicators during operations + - Success/error notifications + - Progress feedback + +### Available Integrations + +The page automatically discovers and displays available provisioning integrations: + +- **Proxmox**: Create VMs and LXC containers on Proxmox Virtual Environment +- **EC2** (future): Create AWS EC2 instances +- **Azure** (future): Create Azure virtual machines +- **Terraform** (future): Deploy infrastructure as code + +Only integrations that are configured and connected will appear. + +## Creating Virtual Machines + +### VM Creation Workflow + +#### Step 1: Select VM Tab + +1. Navigate to the Provision page +2. If multiple integrations are available, select your target integration (e.g., Proxmox) +3. Click on the **"VM"** tab to access the VM creation form + +#### Step 2: Configure Required Parameters + +**VM ID (Required)**: + +- Unique identifier for the VM +- Must be between 100 and 999999999 +- Cannot conflict with existing VMs +- Example: `100`, `1001`, `5000` + +**VM Name (Required)**: + +- Descriptive name for the VM +- Used for identification and management +- Should be meaningful and follow your naming convention +- Example: `web-server-01`, `database-prod`, `app-staging` + +**Target Node (Required)**: + +- Proxmox node where the VM will be created +- Select from available nodes in your cluster +- Consider resource availability and location +- Example: `pve1`, `node-01`, `proxmox-host` + +#### Step 3: Configure Optional Parameters + +**CPU Configuration**: + +- **Cores**: Number of CPU cores (default: 1) + - Range: 1-128 (depending on host) + - Example: `2`, `4`, `8` +- **Sockets**: Number of CPU sockets (default: 1) + - Usually 1 for most workloads + - Example: `1`, `2` +- **CPU Type**: CPU model to emulate + - Options: `host`, `kvm64`, `qemu64` + - `host` provides best performance + - Example: `host` + +**Memory Configuration**: + +- **Memory**: RAM in megabytes (default: 512) + - Minimum: 512 MB + - Example: `2048` (2 GB), `4096` (4 GB), `8192` (8 GB) + +**Storage Configuration**: + +- **Disk (scsi0)**: Primary disk configuration + - Format: `storage:size` + - Example: `local-lvm:32` (32 GB on local-lvm storage) + - Example: `ceph-pool:100` (100 GB on Ceph storage) + +- **CD/DVD (ide2)**: ISO image for installation + - Format: `storage:iso/image.iso` + - Example: `local:iso/ubuntu-22.04-server.iso` + - Leave empty if not needed + +**Network Configuration**: + +- **Network (net0)**: Network interface configuration + - Format: `model=virtio,bridge=vmbr0` + - Example: `model=virtio,bridge=vmbr0,firewall=1` + - Common models: `virtio`, `e1000`, `rtl8139` + +**Operating System**: + +- **OS Type**: Operating system type + - Options: `l26` (Linux 2.6+), `win10`, `win11`, `other` + - Helps Proxmox optimize settings + - Example: `l26` + +#### Step 4: Review and Submit + +1. **Validate Configuration**: + - Check all required fields are filled + - Verify values are within acceptable ranges + - Review validation messages (if any) + +2. **Submit Creation Request**: + - Click the **"Create VM"** button + - A loading indicator appears + - Wait for the operation to complete + +3. **Review Results**: + - Success: Green notification with VM ID and details + - Failure: Red notification with error message + - Task ID for tracking the operation + +### VM Creation Examples + +**Example 1: Basic Web Server** + +``` +VM ID: 100 +Name: web-server-01 +Node: pve1 +Cores: 2 +Memory: 2048 +Disk: local-lvm:32 +Network: model=virtio,bridge=vmbr0 +OS Type: l26 +``` + +**Example 2: Database Server** + +``` +VM ID: 200 +Name: postgres-prod +Node: pve2 +Cores: 4 +Memory: 8192 +Sockets: 1 +CPU: host +Disk: ceph-pool:100 +Network: model=virtio,bridge=vmbr0,firewall=1 +OS Type: l26 +``` + +**Example 3: Windows Desktop** + +``` +VM ID: 300 +Name: win11-desktop +Node: pve1 +Cores: 4 +Memory: 8192 +Disk: local-lvm:64 +CD/DVD: local:iso/windows11.iso +Network: model=e1000,bridge=vmbr0 +OS Type: win11 +``` + +## Creating LXC Containers + +### LXC Creation Workflow + +#### Step 1: Select LXC Tab + +1. Navigate to the Provision page +2. Select your target integration (e.g., Proxmox) +3. Click on the **"LXC"** tab to access the container creation form + +#### Step 2: Configure Required Parameters + +**Container ID (Required)**: + +- Unique identifier for the container +- Must be between 100 and 999999999 +- Cannot conflict with existing containers or VMs +- Example: `101`, `1002`, `5001` + +**Hostname (Required)**: + +- Container hostname +- Must be lowercase alphanumeric with hyphens +- Used for network identification +- Example: `web-container`, `app-01`, `cache-server` + +**Target Node (Required)**: + +- Proxmox node where the container will be created +- Select from available nodes in your cluster +- Example: `pve1`, `node-01` + +**OS Template (Required)**: + +- Container template to use +- Format: `storage:vztmpl/template-name.tar.zst` +- Must exist on the target node +- Example: `local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst` +- Example: `local:vztmpl/debian-11-standard_11.7-1_amd64.tar.zst` + +#### Step 3: Configure Optional Parameters + +**CPU Configuration**: + +- **Cores**: Number of CPU cores (default: 1) + - Range: 1-128 + - Example: `1`, `2`, `4` + +**Memory Configuration**: + +- **Memory**: RAM in megabytes (default: 512) + - Minimum: 512 MB + - Example: `512`, `1024`, `2048` + +**Storage Configuration**: + +- **Root Filesystem (rootfs)**: Root filesystem size + - Format: `storage:size` + - Example: `local-lvm:8` (8 GB) + - Example: `ceph-pool:16` (16 GB) + +**Network Configuration**: + +- **Network (net0)**: Network interface configuration + - Format: `name=eth0,bridge=vmbr0,ip=dhcp` + - Example: `name=eth0,bridge=vmbr0,ip=192.168.1.100/24,gw=192.168.1.1` + - Use `ip=dhcp` for automatic IP assignment + +**Security**: + +- **Root Password**: Root password for the container + - Optional but recommended + - Use a strong password + - Store securely + +#### Step 4: Review and Submit + +1. **Validate Configuration**: + - Check all required fields are filled + - Verify hostname format is correct + - Ensure template exists on target node + +2. **Submit Creation Request**: + - Click the **"Create LXC"** button + - A loading indicator appears + - Wait for the operation to complete + +3. **Review Results**: + - Success: Green notification with container ID + - Failure: Red notification with error message + - Task ID for tracking + +### LXC Creation Examples + +**Example 1: Basic Web Container** + +``` +Container ID: 101 +Hostname: web-container-01 +Node: pve1 +Template: local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst +Cores: 1 +Memory: 1024 +Root FS: local-lvm:8 +Network: name=eth0,bridge=vmbr0,ip=dhcp +``` + +**Example 2: Application Container with Static IP** + +``` +Container ID: 102 +Hostname: app-backend +Node: pve2 +Template: local:vztmpl/debian-11-standard_11.7-1_amd64.tar.zst +Cores: 2 +Memory: 2048 +Root FS: ceph-pool:16 +Network: name=eth0,bridge=vmbr0,ip=192.168.1.50/24,gw=192.168.1.1 +Password: (set securely) +``` + +**Example 3: Development Container** + +``` +Container ID: 103 +Hostname: dev-env +Node: pve1 +Template: local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst +Cores: 2 +Memory: 2048 +Root FS: local-lvm:16 +Network: name=eth0,bridge=vmbr0,ip=dhcp +``` + +## Monitoring Provisioning Operations + +### Real-Time Feedback + +During provisioning operations: + +1. **Loading Indicators**: + - Submit button becomes disabled + - Spinner or progress indicator appears + - Form fields are locked + +2. **Status Updates**: + - Operation progress displayed + - Estimated time remaining (if available) + - Current step in the process + +3. **Completion Notifications**: + - Success: Green toast notification with details + - Failure: Red toast notification with error + - Auto-dismiss after 5 seconds (success) or manual dismiss (errors) + +### Viewing Created Resources + +After successful provisioning: + +1. **Navigate to Inventory**: + - Click "Inventory" in the main menu + - New VM or container appears in the list + - May take a few moments to sync + +2. **Access Node Detail Page**: + - Click on the newly created resource + - View configuration and status + - Access management actions + +3. **Check Execution History**: + - View provisioning operation in execution history + - Review operation details and output + - Track task completion + +## Best Practices + +### Planning + +**Before Creating Resources**: + +1. **Plan Resource Allocation**: + - Determine CPU, memory, and storage requirements + - Check available resources on target nodes + - Consider future growth and scaling + +2. **Choose Appropriate IDs**: + - Use a consistent numbering scheme + - Document ID ranges for different purposes + - Example: 100-199 for web servers, 200-299 for databases + +3. **Follow Naming Conventions**: + - Use descriptive, meaningful names + - Include environment indicators (prod, dev, staging) + - Example: `web-prod-01`, `db-staging-02` + +4. **Select Appropriate Templates**: + - Use official, up-to-date templates + - Verify template compatibility with your needs + - Test templates in development first + +### Security + +**Secure Configuration**: + +1. **Disable Destructive Actions in Production**: + - Set `ALLOW_DESTRUCTIVE_PROVISIONING=false` to prevent VM/container destruction + - This blocks Proxmox destroy and AWS terminate across all integrations + - Non-destructive lifecycle actions (start, stop, reboot) remain available + +2. **Use Strong Passwords**: + - Generate random, complex passwords + - Store passwords in a password manager + - Never hardcode passwords in scripts + +3. **Network Segmentation**: + - Place resources in appropriate network segments + - Use firewalls to restrict access + - Configure security groups properly + +4. **Minimal Permissions**: + - Grant only necessary permissions + - Use separate accounts for different purposes + - Audit permission usage regularly + +### Resource Management + +**Efficient Resource Usage**: + +1. **Right-Size Resources**: + - Don't over-provision CPU and memory + - Start small and scale up as needed + - Monitor resource utilization + +2. **Storage Planning**: + - Allocate appropriate disk space + - Use thin provisioning when possible + - Plan for backups and snapshots + +3. **Network Configuration**: + - Use DHCP for dynamic environments + - Use static IPs for servers + - Document IP allocations + +### Testing + +**Test Before Production**: + +1. **Development Environment**: + - Test provisioning in development first + - Verify configurations work as expected + - Document successful configurations + +2. **Validation**: + - Test VM/container starts successfully + - Verify network connectivity + - Check resource allocation + +3. **Documentation**: + - Document provisioning procedures + - Keep configuration templates + - Maintain inventory records + +## Troubleshooting + +### Common Issues + +#### Problem: "VMID already exists" + +**Symptoms**: + +``` +Error: VM with VMID 100 already exists on node pve1 +``` + +**Solutions**: + +1. Choose a different VMID +2. Check existing VMs: Navigate to Inventory and search +3. If the VM should be removed, delete it first via the Manage tab + +#### Problem: "Insufficient resources" + +**Symptoms**: + +``` +Error: Not enough memory available on node +Error: Storage full +``` + +**Solutions**: + +1. Check available resources on the target node +2. Choose a node with more available resources +3. Reduce resource allocation (CPU, memory, disk) +4. Clean up unused VMs or containers + +#### Problem: "Template not found" + +**Symptoms**: + +``` +Error: Template 'local:vztmpl/ubuntu-22.04.tar.zst' not found +``` + +**Solutions**: + +1. Verify the template name is correct +2. Check templates are downloaded on the target node +3. Download missing templates via Proxmox web interface +4. Use a different template that exists + +#### Problem: "Invalid hostname format" + +**Symptoms**: + +``` +Error: Hostname must contain only lowercase letters, numbers, and hyphens +``` + +**Solutions**: + +1. Use only lowercase letters (a-z) +2. Use numbers (0-9) +3. Use hyphens (-) but not at start or end +4. No underscores, spaces, or special characters +5. Example: `web-server-01` ✓, `Web_Server_01` ✗ + +#### Problem: "Network configuration error" + +**Symptoms**: + +``` +Error: Invalid network configuration +Error: Bridge 'vmbr1' does not exist +``` + +**Solutions**: + +1. Verify bridge name exists on target node +2. Check network configuration syntax +3. Use correct format: `model=virtio,bridge=vmbr0` +4. Consult Proxmox documentation for network options + +#### Problem: "Permission denied" + +**Symptoms**: + +``` +Error: User does not have permission to create VMs +Error: Insufficient privileges +``` + +**Solutions**: + +1. Contact your administrator for provisioning permissions +2. Verify your user account has the correct role +3. Check integration permissions are configured correctly + +#### Problem: "Operation timeout" + +**Symptoms**: + +``` +Error: Provisioning operation timed out +``` + +**Solutions**: + +1. Check target node is responsive +2. Verify network connectivity to Proxmox +3. Try again - the node may have been busy +4. Contact administrator if problem persists + +### Getting Help + +If you encounter issues not covered here: + +1. **Check Integration Status**: + - Navigate to Setup page + - Verify integration is connected + - Test connection + +2. **Review Error Messages**: + - Read error messages carefully + - Look for specific error codes + - Note any suggested actions + +3. **Check Logs**: + - Enable Expert Mode for detailed errors + - Review execution history + - Check Proxmox logs on the server + +4. **Contact Support**: + - Provide error messages + - Include configuration details (without sensitive data) + - Describe steps to reproduce + +## Related Documentation + +- [Proxmox Integration Setup](integrations/proxmox.md) - Configure Proxmox integration +- [Manage Tab Guide](manage-tab-guide.md) - Manage VM and container lifecycle +- [Permissions and RBAC](permissions-rbac.md) - Understand permission requirements +- [Troubleshooting Guide](troubleshooting.md) - General troubleshooting + +## Support + +For additional help: + +- **Documentation**: [pabawi.dev/docs](https://pabawi.dev/docs) +- **GitHub Issues**: [pabawi/issues](https://github.com/pabawi/pabawi/issues) +- **Proxmox Documentation**: [pve.proxmox.com/wiki](https://pve.proxmox.com/wiki) diff --git a/docs/proxmox-setup-guide.md b/docs/proxmox-setup-guide.md new file mode 100644 index 00000000..3807f1ce --- /dev/null +++ b/docs/proxmox-setup-guide.md @@ -0,0 +1,737 @@ +# Proxmox Integration Setup Guide + +## Overview + +This guide walks you through configuring the Proxmox integration in Pabawi, enabling you to manage Proxmox Virtual Environment infrastructure through the web interface. The integration provides inventory discovery, lifecycle management, and provisioning capabilities for VMs and LXC containers. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Configuration Methods](#configuration-methods) +- [Web Interface Setup](#web-interface-setup) +- [Environment Variable Setup](#environment-variable-setup) +- [Authentication Options](#authentication-options) +- [Testing the Connection](#testing-the-connection) +- [Troubleshooting](#troubleshooting) +- [Security Best Practices](#security-best-practices) + +## Prerequisites + +Before configuring the Proxmox integration, ensure you have: + +### Proxmox Requirements + +- **Proxmox VE**: Version 6.x or 7.x installed and running +- **API Access**: Proxmox API enabled (default on port 8006) +- **Network Access**: Pabawi server can reach Proxmox on port 8006 +- **Credentials**: Either API token or username/password for authentication + +### Pabawi Requirements + +- **Administrator Access**: You need administrator permissions in Pabawi +- **Integration Permissions**: Permission to configure integrations +- **Network Connectivity**: Pabawi server can reach Proxmox server + +### Proxmox Permissions + +The Proxmox user or token needs these permissions: + +- `VM.Allocate` - Create VMs and containers +- `VM.Config.*` - Configure VMs and containers +- `VM.PowerMgmt` - Start, stop, and manage power state +- `VM.Audit` - Read VM information +- `Datastore.Allocate` - Allocate disk space + +## Configuration Methods + +You can configure the Proxmox integration using two methods: + +1. **Web Interface** (Recommended): User-friendly form with validation and connection testing +2. **Environment Variables**: Configuration file for automated deployments + +Both methods achieve the same result. Choose based on your preference and deployment method. + +## Web Interface Setup + +### Step 1: Access Integration Setup + +1. **Log in to Pabawi**: + - Open your web browser + - Navigate to your Pabawi URL + - Log in with administrator credentials + +2. **Navigate to Setup Page**: + - Click on **"Setup"** or **"Integrations"** in the main menu + - Look for the **"Proxmox"** section + - Click to expand the Proxmox configuration form + +### Step 2: Configure Connection Settings + +**Host (Required)**: + +- Enter the Proxmox server hostname or IP address +- Do not include `https://` or port number +- Examples: + - `proxmox.example.com` + - `192.168.1.100` + - `pve.local` + +**Port (Required)**: + +- Default: `8006` +- Only change if you've customized Proxmox API port +- Must be between 1 and 65535 + +### Step 3: Choose Authentication Method + +You have two authentication options: + +#### Option A: API Token Authentication (Recommended) + +**Token (Required for this method)**: + +- Format: `user@realm!tokenid=uuid` +- Example: `automation@pve!api-token=12345678-1234-1234-1234-123456789abc` +- See [Creating an API Token](#creating-an-api-token) below + +**Advantages**: + +- More secure than password authentication +- Fine-grained permission control +- No password expiration issues +- Can be easily revoked + +#### Option B: Username/Password Authentication + +**Username (Required for this method)**: + +- Proxmox username +- Example: `root`, `admin`, `automation` + +**Password (Required for this method)**: + +- User's password +- Stored securely (encrypted) + +**Realm (Required for this method)**: + +- Authentication realm +- Options: + - `pam` - Linux PAM authentication + - `pve` - Proxmox VE authentication +- Default: `pam` + +**Note**: Authentication tickets expire after 2 hours. Pabawi automatically refreshes them. + +### Step 4: Configure SSL Options + +**Reject Unauthorized Certificates**: + +- Toggle: On (recommended) / Off +- When **On**: Verifies SSL certificates (secure) +- When **Off**: Accepts self-signed certificates (less secure) + +**Warning**: Disabling certificate verification is insecure and should only be used for testing. + +**For Self-Signed Certificates**: + +- Keep verification enabled +- Provide the CA certificate path (see Environment Variable Setup) +- Or add the certificate to your system's trust store + +### Step 5: Test Connection + +Before saving, test the connection: + +1. **Click "Test Connection"**: + - Button sends a test request to Proxmox + - Verifies credentials and connectivity + - Shows result message + +2. **Review Test Results**: + - **Success**: Green message "Connection successful" + - Proxmox version displayed + - Ready to save configuration + - **Failure**: Red message with error details + - Review error message + - Fix issues before saving + +3. **Common Test Errors**: + - "Connection refused": Check host and port + - "Authentication failed": Verify credentials + - "Certificate error": Check SSL settings + - "Timeout": Check network connectivity + +### Step 6: Save Configuration + +1. **Review All Settings**: + - Verify host and port are correct + - Confirm authentication method is configured + - Check SSL settings are appropriate + +2. **Click "Save Configuration"**: + - Button saves settings to backend + - Success message appears + - Integration becomes active + +3. **Verify Integration Status**: + - Integration status should show "Connected" + - Green indicator appears + - Ready to use + +## Environment Variable Setup + +For automated deployments or when you prefer configuration files: + +### Step 1: Edit Environment File + +1. **Locate the .env file**: + + ```bash + cd /path/to/pabawi + nano backend/.env + ``` + +2. **Add Proxmox Configuration**: + +### Step 2: Basic Configuration + +```bash +# Proxmox Integration +PROXMOX_ENABLED=true +PROXMOX_HOST=proxmox.example.com +PROXMOX_PORT=8006 +``` + +### Step 3: Choose Authentication Method + +**Option A: Token Authentication (Recommended)**: + +```bash +# Token format: user@realm!tokenid=uuid +PROXMOX_TOKEN=automation@pve!api-token=12345678-1234-1234-1234-123456789abc +``` + +**Option B: Username/Password Authentication**: + +```bash +PROXMOX_USERNAME=root +PROXMOX_PASSWORD=your-secure-password +PROXMOX_REALM=pam +``` + +### Step 4: SSL Configuration + +**For Production (Verified Certificates)**: + +```bash +PROXMOX_SSL_VERIFY=true +``` + +**For Self-Signed Certificates**: + +```bash +PROXMOX_SSL_VERIFY=true +PROXMOX_CA_CERT=/path/to/proxmox-ca.pem +``` + +**For Testing Only (Insecure)**: + +```bash +PROXMOX_SSL_VERIFY=false +``` + +### Step 5: Optional Advanced Settings + +```bash +# Request timeout (milliseconds) +PROXMOX_TIMEOUT=30000 + +# Client certificate authentication (optional) +PROXMOX_CLIENT_CERT=/path/to/client-cert.pem +PROXMOX_CLIENT_KEY=/path/to/client-key.pem +``` + +### Step 6: Restart Pabawi + +After editing the .env file: + +```bash +# For systemd +sudo systemctl restart pabawi + +# For Docker +docker-compose restart + +# For development +npm run dev:backend +``` + +## Authentication Options + +### Creating an API Token + +API tokens provide secure, fine-grained access control. + +#### Step 1: Access Proxmox Web Interface + +1. Open your browser +2. Navigate to `https://your-proxmox-host:8006` +3. Log in with administrator credentials + +#### Step 2: Navigate to API Tokens + +1. Click on **"Datacenter"** in the left sidebar +2. Expand **"Permissions"** +3. Click on **"API Tokens"** + +#### Step 3: Create New Token + +1. **Click "Add"** button +2. **Configure Token**: + - **User**: Select or create a user (e.g., `automation@pve`) + - **Token ID**: Enter a descriptive ID (e.g., `pabawi-api`) + - **Privilege Separation**: + - **Unchecked**: Token has full user permissions (recommended for Pabawi) + - **Checked**: Token has limited permissions (requires additional configuration) + - **Expire**: Set expiration date or leave empty for no expiration + - **Comment**: Optional description + +3. **Click "Add"** +4. **Copy the Token**: + - Token is displayed once + - Format: `user@realm!tokenid=uuid` + - Example: `automation@pve!pabawi-api=12345678-1234-1234-1234-123456789abc` + - **Save it securely** - you cannot retrieve it later + +#### Step 4: Configure Permissions + +If you enabled Privilege Separation, grant these permissions: + +1. **Navigate to Permissions**: + - Datacenter → Permissions → Add → API Token Permission + +2. **Grant Required Permissions**: + - Path: `/` + - API Token: Select your token + - Role: Create a custom role with: + - `VM.Allocate` + - `VM.Config.*` + - `VM.PowerMgmt` + - `VM.Audit` + - `Datastore.Allocate` + +3. **Click "Add"** + +### Using Username/Password + +If you prefer password authentication: + +#### Step 1: Create Dedicated User (Recommended) + +1. **Navigate to Users**: + - Datacenter → Permissions → Users + - Click "Add" + +2. **Configure User**: + - **User name**: `pabawi-automation` + - **Realm**: `pve` (Proxmox VE) + - **Password**: Generate a strong password + - **Email**: Optional + - **Enabled**: Checked + +3. **Click "Add"** + +#### Step 2: Grant Permissions + +1. **Navigate to Permissions**: + - Datacenter → Permissions → Add → User Permission + +2. **Configure Permissions**: + - Path: `/` + - User: `pabawi-automation@pve` + - Role: Create or select role with required permissions + +3. **Click "Add"** + +#### Step 3: Use in Pabawi + +Configure Pabawi with: + +- Username: `pabawi-automation` +- Password: (the password you set) +- Realm: `pve` + +## Testing the Connection + +### Via Web Interface + +1. **Navigate to Setup Page** +2. **Locate Proxmox Configuration** +3. **Click "Test Connection"** +4. **Review Results**: + - Success: Shows Proxmox version + - Failure: Shows error message + +### Via Command Line + +Test the connection manually: + +```bash +# Test with token +curl -k https://proxmox.example.com:8006/api2/json/version \ + -H "Authorization: PVEAPIToken=automation@pve!pabawi-api=your-token-uuid" + +# Test with username/password (get ticket first) +curl -k https://proxmox.example.com:8006/api2/json/access/ticket \ + -d "username=root@pam&password=your-password" +``` + +Expected response: + +```json +{ + "data": { + "version": "7.4-3", + "release": "7.4", + "repoid": "6f2f0a33" + } +} +``` + +### Verify Integration Status + +After configuration: + +1. **Check Integration Status**: + + ```bash + curl http://localhost:3000/api/integrations/status + ``` + +2. **Look for Proxmox**: + + ```json + { + "integrations": { + "proxmox": { + "enabled": true, + "connected": true, + "healthy": true, + "message": "Proxmox API is reachable" + } + } + } + ``` + +3. **Test Inventory Discovery**: + - Navigate to Inventory page + - Look for Proxmox nodes + - Verify VMs and containers appear + +## Troubleshooting + +### Connection Issues + +#### Problem: "Connection refused" + +**Symptoms**: + +``` +Error: connect ECONNREFUSED 192.168.1.100:8006 +``` + +**Solutions**: + +1. Verify Proxmox is running: + + ```bash + systemctl status pveproxy + ``` + +2. Check firewall allows port 8006: + + ```bash + # On Proxmox server + iptables -L -n | grep 8006 + ``` + +3. Test connectivity from Pabawi server: + + ```bash + telnet proxmox.example.com 8006 + nc -zv proxmox.example.com 8006 + ``` + +4. Verify host and port in configuration + +#### Problem: "Authentication failed" + +**Symptoms**: + +``` +Error: Authentication failed: 401 Unauthorized +Error: Authentication failed: 403 Forbidden +``` + +**Solutions**: + +1. **For Token Authentication**: + - Verify token format: `user@realm!tokenid=uuid` + - Check token hasn't expired + - Verify token exists in Proxmox + - Check token permissions + +2. **For Password Authentication**: + - Verify username is correct + - Check password is correct + - Verify realm is correct (`pam` or `pve`) + - Check user account isn't locked + +3. **Test Manually**: + + ```bash + # Test token + curl -k https://proxmox.example.com:8006/api2/json/version \ + -H "Authorization: PVEAPIToken=your-token" + + # Test password + curl -k https://proxmox.example.com:8006/api2/json/access/ticket \ + -d "username=root@pam&password=your-password" + ``` + +#### Problem: "Certificate verification failed" + +**Symptoms**: + +``` +Error: unable to verify the first certificate +Error: self signed certificate in certificate chain +``` + +**Solutions**: + +1. **Provide CA Certificate** (Recommended): + + ```bash + # Export Proxmox CA + scp root@proxmox:/etc/pve/pve-root-ca.pem ./proxmox-ca.pem + + # Configure in Pabawi + PROXMOX_CA_CERT=/path/to/proxmox-ca.pem + ``` + +2. **Disable Verification** (Testing Only): + + ```bash + PROXMOX_SSL_VERIFY=false + ``` + + **Warning**: This is insecure. Use only for testing. + +3. **Add Certificate to System Trust Store**: + + ```bash + # On Ubuntu/Debian + sudo cp proxmox-ca.pem /usr/local/share/ca-certificates/proxmox.crt + sudo update-ca-certificates + ``` + +#### Problem: "Timeout" + +**Symptoms**: + +``` +Error: Request timeout after 30000ms +``` + +**Solutions**: + +1. Check network latency: + + ```bash + ping proxmox.example.com + ``` + +2. Increase timeout: + + ```bash + PROXMOX_TIMEOUT=60000 # 60 seconds + ``` + +3. Check Proxmox server load: + + ```bash + ssh root@proxmox 'uptime' + ``` + +### Permission Issues + +#### Problem: "Permission denied" for operations + +**Symptoms**: + +``` +Error: Permission denied +Error: Insufficient privileges +``` + +**Solutions**: + +1. **Verify Token Permissions**: + - Log in to Proxmox web interface + - Navigate to Datacenter → Permissions + - Check token has required permissions + +2. **Grant Missing Permissions**: + - Add → API Token Permission + - Path: `/` + - Token: Your token + - Role: Administrator or custom role with required permissions + +3. **Check Privilege Separation**: + - If enabled, token needs explicit permissions + - If disabled, token inherits user permissions + +4. **Test Specific Operations**: + + ```bash + # Test VM creation permission + curl -k https://proxmox.example.com:8006/api2/json/nodes/pve/qemu \ + -H "Authorization: PVEAPIToken=your-token" \ + -X POST -d "vmid=999&name=test" + ``` + +### Configuration Issues + +#### Problem: "Integration not appearing" + +**Symptoms**: + +- Proxmox not listed in integrations +- No Proxmox nodes in inventory + +**Solutions**: + +1. Verify integration is enabled: + + ```bash + grep PROXMOX_ENABLED backend/.env + # Should show: PROXMOX_ENABLED=true + ``` + +2. Check configuration is valid: + + ```bash + # All required fields present + grep PROXMOX backend/.env + ``` + +3. Restart Pabawi: + + ```bash + sudo systemctl restart pabawi + ``` + +4. Check logs for errors: + + ```bash + sudo journalctl -u pabawi -f | grep -i proxmox + ``` + +## Security Best Practices + +### Authentication + +1. **Use API Tokens**: + - More secure than passwords + - Easier to rotate and revoke + - Fine-grained permissions + +2. **Dedicated User Account**: + - Create a separate user for Pabawi + - Don't use root account + - Limit permissions to what's needed + +3. **Strong Passwords**: + - Use password generator + - Minimum 16 characters + - Mix of letters, numbers, symbols + - Store in password manager + +4. **Regular Rotation**: + - Rotate tokens every 90 days + - Change passwords regularly + - Revoke unused tokens + +### Network Security + +1. **Use HTTPS**: + - Always use encrypted connections + - Never disable SSL in production + - Verify certificates + +2. **Firewall Rules**: + - Restrict access to Proxmox API + - Allow only Pabawi server IP + - Block public access + +3. **Network Segmentation**: + - Place Proxmox in management network + - Separate from production networks + - Use VPN for remote access + +### Access Control + +1. **Least Privilege**: + - Grant minimum required permissions + - Review permissions regularly + - Remove unused permissions + +2. **Audit Logging**: + - Enable Proxmox audit logging + - Monitor API access + - Review logs regularly + +3. **Multi-Factor Authentication**: + - Enable MFA for Proxmox web interface + - Use MFA for Pabawi access + - Protect administrator accounts + +### Configuration Security + +1. **Secure Storage**: + - Protect .env file permissions: + + ```bash + chmod 600 backend/.env + ``` + + - Don't commit secrets to git + - Use secrets management tools + +2. **Environment Variables**: + - Use environment variables for secrets + - Don't hardcode credentials + - Rotate secrets regularly + +3. **Backup Configuration**: + - Backup configuration securely + - Encrypt backups + - Test restore procedures + +## Related Documentation + +- [Proxmox Integration](integrations/proxmox.md) - Detailed integration documentation +- [Provisioning Guide](provisioning-guide.md) - How to create VMs and containers +- [Permissions and RBAC](permissions-rbac.md) - Permission requirements +- [Troubleshooting Guide](troubleshooting.md) - General troubleshooting + +## Support + +For additional help: + +- **Pabawi Documentation**: [pabawi.dev/docs](https://pabawi.dev/docs) +- **GitHub Issues**: [pabawi/issues](https://github.com/pabawi/pabawi/issues) +- **Proxmox Documentation**: [pve.proxmox.com/wiki](https://pve.proxmox.com/wiki) +- **Proxmox Forum**: [forum.proxmox.com](https://forum.proxmox.com) diff --git a/docs/security-assessment-v0.10.0.md b/docs/security-assessment-v0.10.0.md new file mode 100644 index 00000000..560d97c5 --- /dev/null +++ b/docs/security-assessment-v0.10.0.md @@ -0,0 +1,321 @@ +# Pabawi v0.10.0 — Security Assessment Report + +**Date:** 2026-03-22 +**Scope:** Full codebase review — backend (`backend/src/`) and frontend (`frontend/src/`) +**Method:** Manual static analysis + +--- + +## Executive Summary + +No critical vulnerabilities were found. The codebase applies several security best practices correctly: parameterized SQL queries, `spawn()` without `shell: true`, helmet headers, rate limiting, input sanitization middleware, and bcrypt for passwords. + +The most actionable findings are concentrated in **Ansible argument injection**, **JWT token storage in localStorage**, **CORS permissiveness in production**, and **command whitelist bypass via whitespace**. + +| Severity | Count | +|-----------|-------| +| Critical | 0 | +| High | 1 | +| Medium | 9 | +| Low | 8 | +| **Total** | **18** | + +--- + +## Findings + +### HIGH + +--- + +#### H-01 — Ansible Module Argument Injection + +**File:** `backend/src/integrations/ansible/AnsibleService.ts` +**Method:** `toModuleArgString()` + +The method escapes backslashes and double quotes, but does not handle newlines (`\n`), null bytes, or Ansible-specific special syntax in parameter values. A crafted value could inject additional `-m` or `-a` flags when the resulting string is passed to Ansible. + +```typescript +private toModuleArgString(args: Record): string { + return Object.entries(args) + .map(([key, value]) => { + const strValue = String(value).replace(/\\/g, "\\\\").replace(/"/g, '\\"'); + return strValue.includes(" ") ? `${key}="${strValue}"` : `${key}=${strValue}`; + }) + .join(" "); +} +``` + +**Fix:** Strip or reject newlines and control characters from values before quoting. Additionally validate keys against an allowlist of expected parameter names for each module. + +--- + +### MEDIUM + +--- + +#### M-01 — JWT Token Stored in `localStorage` + +**File:** `frontend/src/lib/auth.svelte.ts` + +Access token, refresh token, and user object are all written to `localStorage`. Any XSS (even minor, e.g. via a dependency) can exfiltrate these without any further interaction. + +```typescript +localStorage.setItem(TOKEN_KEY, data.token); +localStorage.setItem(REFRESH_TOKEN_KEY, data.refreshToken); +localStorage.setItem(USER_KEY, JSON.stringify(data.user)); +``` + +**Fix:** Prefer `httpOnly` secure cookies for tokens. If cookies are not viable, store only a short-lived access token in memory (`$state`) and use a `httpOnly` cookie exclusively for the refresh token. + +--- + +#### M-02 — CORS Defaults Include `localhost` in Production + +**File:** `backend/src/config/schema.ts` + +```typescript +corsAllowedOrigins: z.array(z.string()).default([ + "http://localhost:5173", + "http://localhost:3000", +]), +``` + +If `CORS_ALLOWED_ORIGINS` is not set in the production environment, the server silently accepts requests from localhost origins. This is unlikely to be exploited remotely but represents an accidental misconfiguration surface. + +**Fix:** In `server.ts` startup validation, if `NODE_ENV === 'production'` and the CORS origin list still contains `localhost`, throw and abort startup. + +--- + +#### M-03 — Ansible Node ID Not Validated + +**File:** `backend/src/integrations/ansible/AnsibleService.ts` + +`nodeId` is passed directly as an argument to `spawn()`. Although `shell: false` prevents shell interpretation, Ansible parses the value itself and a crafted node ID (`-e @/etc/passwd`) could inject Ansible CLI flags. + +```typescript +const args = [ + nodeId, // user-supplied, no validation + "-i", this.inventoryPath, + "-m", "shell", + "-a", command, +]; +``` + +**Fix:** Validate `nodeId` against `^[a-zA-Z0-9._-]+$` before constructing the argument list. + +--- + +#### M-04 — SSE Auth Token Exposed via Query Parameter + +**File:** `backend/src/routes/streaming.ts` + +The SSE endpoint accepts `?token=` as a fallback because the `EventSource` API cannot set headers. Query parameters are captured in access logs, proxy logs, and browser history. + +```typescript +if (typeof req.query.token === "string" && !req.headers.authorization) { + req.headers.authorization = `Bearer ${req.query.token}`; + delete (req.query as Record).token; +} +``` + +**Fix:** Ensure the access logger redacts the `token` query parameter. Consider migrating SSE streams to WebSocket (which supports custom headers) for long-term improvement. + +--- + +#### M-05 — Rate Limit Window Allows Burst Before Account Lockout + +**File:** `backend/src/middleware/securityMiddleware.ts` + +The IP-level auth rate limiter allows 10 attempts per 15 minutes. The account lockout fires after 5 failed attempts within 15 minutes. Between the two systems there is a window where an IP can fail 10 times (across multiple accounts) before the IP is rate-limited, while each individual account locks at 5. + +**Fix:** Reduce the IP rate limit window to 5 attempts per 15 minutes to match the per-account lockout threshold. Consider adding exponential back-off. + +--- + +#### M-06 — Loose Content Security Policy (`unsafe-inline` for Styles) + +**File:** `backend/src/middleware/securityMiddleware.ts` + +```typescript +styleSrc: ["'self'", "'unsafe-inline'"], +``` + +`unsafe-inline` for styles allows CSS injection attacks and weakens XSS mitigation. While style-only injection is lower severity than script injection, it enables clickjacking via CSS overlay and data exfiltration via CSS selectors. + +**Fix:** Replace with CSP nonces or `style-src-attr` + `style-src-elem` with hashes. Svelte's scoped styles do not require `unsafe-inline` in production builds. + +--- + +#### M-07 — Path Traversal Risk in Temporary Inventory Files + +**File:** `backend/src/integrations/ansible/AnsibleService.ts` + +Temporary inventory files are constructed with filenames derived from user-supplied `nodeId` values. A crafted `nodeId` containing `../` sequences could cause writes outside the intended temp directory. + +**Fix:** Always derive temp file paths exclusively from `os.tmpdir()` + `crypto.randomUUID()`, never from user input. + +--- + +#### M-08 — SSH Credentials Held in Plain-Text Process Memory + +**File:** `backend/src/integrations/ssh/SSHService.ts` + +SSH passwords and sudo passwords are stored as plain strings in memory throughout the connection lifecycle and are accessible via heap dumps or memory inspection. + +```typescript +if (host.password) { + connectConfig.password = host.password; +} +``` + +**Fix:** Retrieve credentials from an external secret store (e.g. HashiCorp Vault) at connection time and avoid retaining them in long-lived objects. At minimum, overwrite the string reference after use. + +--- + +#### M-09 — No CSRF Mitigation for Non-JWT Paths + +**File:** `backend/src/middleware/securityMiddleware.ts` + +The application uses JWT in the `Authorization` header, which is immune to standard CSRF. However, the SSE token-in-query-parameter mechanism (see M-04) means that if tokens end up in cookies or a future auth path changes, CSRF protection will be absent. + +**Fix:** Add `SameSite=Strict` or `SameSite=Lax` to all cookies now. Validate `Origin` header on all state-changing requests as a defence-in-depth measure. + +--- + +### LOW + +--- + +#### L-01 — Command Whitelist Bypass via Internal Whitespace + +**File:** `backend/src/services/CommandWhitelistService.ts` (or `backend/src/validation/CommandWhitelistService.ts`) + +The whitelist check normalizes leading/trailing whitespace with `.trim()` but not internal whitespace. A command like `ps -adef` (double space) would not match a whitelist entry `ps -adef` under prefix-match mode. + +```typescript +const trimmedCommand = command.trim(); +return this.config.whitelist.some((allowed) => + trimmedCommand === allowed || trimmedCommand.startsWith(allowed + " "), +); +``` + +**Fix:** Normalize internal whitespace: `command.trim().replace(/\s+/g, ' ')`. + +--- + +#### L-02 — Plain-Text Username in Auth Failure Logs + +**File:** `backend/src/services/AuthenticationService.ts` + +```typescript +console.warn(`[AUTH FAILURE] ${timestamp} - Username: ${username} - Reason: ${reason}`); +``` + +Logging plaintext usernames in failure messages leaks valid account names if logs are exposed. + +**Fix:** Log a truncated or hashed username (`SHA-256(username).slice(0,8)`) instead of the raw value. + +--- + +#### L-03 — `console.error()` Used Instead of `LoggerService` + +**File:** `backend/src/services/AuthenticationService.ts` (multiple catch blocks) + +Direct `console.error()` calls bypass `LoggerService`, losing structured metadata and making log aggregation inconsistent. + +**Fix:** Replace all `console.*` calls with `this.logger.*` equivalents. + +--- + +#### L-04 — No Refresh Token Rotation + +**File:** `frontend/src/lib/auth.svelte.ts` + +Refresh tokens are long-lived and not rotated on each use. A stolen refresh token can be used repeatedly without detection. + +**Fix:** Issue a new refresh token on each access token refresh and invalidate the old one (refresh token rotation). Detect reuse of invalidated refresh tokens as a compromise signal. + +--- + +#### L-05 — Ephemeral JWT Secret in Development Not Flagged Loudly + +**File:** `backend/src/services/AuthenticationService.ts` + +When `JWT_SECRET` is absent in non-production, an ephemeral secret is generated silently (beyond a log warning). Sessions are invalidated on every restart with no user-visible error. + +**Fix:** Print a prominent startup banner (not just a log line) and refuse to start if `JWT_SECRET` is absent even in development, or document the ephemeral behaviour explicitly in `README`. + +--- + +#### L-06 — SSH Private Key Path Exposed in Logs + +**File:** `backend/src/integrations/ssh/SSHService.ts` and config + +The full filesystem path to the SSH private key is logged during configuration initialisation. If logs are forwarded to an external system, this reveals key locations to potential attackers. + +**Fix:** Log only the filename, not the full path. Obfuscate or omit key paths from structured log metadata. + +--- + +#### L-07 — No HSTS Header Verified in Production + +**File:** `backend/src/middleware/securityMiddleware.ts` + +Helmet is configured, but there is no startup assertion that `Strict-Transport-Security` is active. In HTTP-only deployments, HSTS has no effect, but the absence of an explicit check means a misconfigured TLS termination proxy could silently serve HTTP. + +**Fix:** Add a startup warning or assertion if `NODE_ENV === 'production'` and `HTTPS` is not confirmed (e.g. via `X-Forwarded-Proto` detection). + +--- + +#### L-08 — Token Revocation Records Never Pruned + +**File:** `backend/src/services/AuthenticationService.ts` / `backend/src/database/` + +Revoked tokens are stored with an `expiresAt` timestamp but there is no scheduled cleanup job to remove expired revocation records. Over time this table will grow unboundedly. + +**Fix:** Add a periodic cleanup query (`DELETE FROM revoked_tokens WHERE expires_at < NOW()`) as a scheduled job or a lazy cleanup triggered on each revocation check. + +--- + +## What Is Done Well + +| Area | Status | +|------|--------| +| SQL queries — all parameterized | ✓ | +| Child process spawning — `shell: false` throughout | ✓ | +| Passwords — bcrypt with default cost factor | ✓ | +| Account lockout — implemented with progressive delay | ✓ | +| Helmet security headers — enabled | ✓ | +| Input sanitization middleware — query, body, params | ✓ | +| Rate limiting — global + auth-specific | ✓ | +| Token revocation — database-backed | ✓ | +| Foreign key enforcement in SQLite | ✓ | +| No `innerHTML` / `{@html}` detected in frontend | ✓ | +| Command execution timeout with SIGTERM→SIGKILL | ✓ | + +--- + +## Priority Remediation Order + +| Priority | ID | Finding | +|----------|----|---------| +| 1 | H-01 | Ansible module argument injection | +| 2 | M-01 | JWT in localStorage | +| 3 | M-03 | Ansible node ID not validated | +| 4 | M-02 | CORS localhost default in production | +| 5 | L-01 | Command whitelist whitespace bypass | +| 6 | M-04 | SSE token in query string logged | +| 7 | M-06 | CSP `unsafe-inline` for styles | +| 8 | M-05 | Auth rate limit vs lockout gap | +| 9 | M-07 | Path traversal in temp inventory | +| 10 | M-08 | SSH credentials in plain-text memory | + +--- + +## Recommendations + +- Run `npm audit` in CI on every PR and block merges on high/critical advisories. +- Add SAST tooling (e.g. Semgrep with the `nodejs` and `typescript` rulesets) to the CI pipeline. +- Introduce a secrets scanning step (e.g. `truffleHog` or `gitleaks`) to prevent accidental credential commits. +- Schedule a follow-up review after Azure support (`.kiro/specs/azure-support/`) is implemented, as cloud credential handling introduces new attack surface. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 4e08cdf9..3d28bea4 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -2788,6 +2788,674 @@ If you can't find a solution in this guide: - Include Pabawi and Bolt versions - Describe steps to reproduce +## Proxmox Provisioning Issues + +### Problem: "VMID already exists" + +**Symptoms:** + +```json +{ + "error": { + "code": "PROXMOX_VMID_EXISTS", + "message": "VM with VMID 100 already exists on node pve1" + } +} +``` + +**Causes:** + +- VMID is already in use by another VM or container +- Previous VM with same ID wasn't fully deleted +- VMID conflict across nodes + +**Solutions:** + +1. **Choose a different VMID:** + - Use a unique ID between 100 and 999999999 + - Check existing VMs in inventory + - Follow your organization's VMID allocation scheme + +2. **Verify existing VM:** + + ```bash + # Via Proxmox CLI + qm list | grep 100 + pct list | grep 100 + + # Via Pabawi + # Navigate to Inventory and search for VMID + ``` + +3. **Delete existing VM if appropriate:** + - Navigate to the existing VM in inventory + - Use Manage tab → Destroy + - Confirm deletion + - Wait for deletion to complete + +4. **Check across all nodes:** + - VMIDs must be unique across the entire cluster + - Check all nodes for conflicts + +### Problem: "Insufficient resources" + +**Symptoms:** + +```json +{ + "error": { + "code": "PROXMOX_INSUFFICIENT_RESOURCES", + "message": "Not enough memory available on node pve1" + } +} +``` + +**Causes:** + +- Target node doesn't have enough CPU, memory, or storage +- Resources allocated but not yet freed +- Storage pool is full + +**Solutions:** + +1. **Check available resources:** + + ```bash + # Via Proxmox CLI + pvesh get /nodes/pve1/status + + # Check storage + pvesh get /nodes/pve1/storage/local-lvm/status + ``` + +2. **Choose a different node:** + - Select a node with more available resources + - Check resource availability in Proxmox web interface + - Balance load across cluster nodes + +3. **Reduce resource allocation:** + - Decrease CPU cores + - Reduce memory allocation + - Use smaller disk size + - Example: Change from 8GB to 4GB RAM + +4. **Free up resources:** + - Stop unused VMs + - Delete temporary VMs + - Clean up old snapshots + - Expand storage if needed + +5. **Check storage space:** + + ```bash + # Check disk usage + df -h + + # Check LVM space + lvs + vgs + ``` + +### Problem: "Template not found" + +**Symptoms:** + +```json +{ + "error": { + "code": "PROXMOX_TEMPLATE_NOT_FOUND", + "message": "Template 'local:vztmpl/ubuntu-22.04.tar.zst' not found" + } +} +``` + +**Causes:** + +- Template doesn't exist on target node +- Wrong template name or path +- Template not downloaded yet +- Storage location incorrect + +**Solutions:** + +1. **List available templates:** + + ```bash + # Via Proxmox CLI + pveam available + pveam list local + ``` + +2. **Download template:** + + ```bash + # Via Proxmox CLI + pveam download local ubuntu-22.04-standard_22.04-1_amd64.tar.zst + + # Or via Proxmox web interface: + # Node → local → CT Templates → Templates → Download + ``` + +3. **Verify template path:** + - Format: `storage:vztmpl/template-name.tar.zst` + - Example: `local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst` + - Check exact filename including version numbers + +4. **Use a different template:** + - Select from available templates + - Use a template that exists on the target node + +5. **Check storage configuration:** + + ```bash + # Verify storage is configured for templates + pvesm status + ``` + +### Problem: "Invalid hostname format" + +**Symptoms:** + +```json +{ + "error": { + "code": "VALIDATION_ERROR", + "message": "Hostname must contain only lowercase letters, numbers, and hyphens" + } +} +``` + +**Causes:** + +- Hostname contains invalid characters +- Uppercase letters used +- Starts or ends with hyphen +- Contains underscores or spaces + +**Solutions:** + +1. **Use valid hostname format:** + - Only lowercase letters (a-z) + - Numbers (0-9) + - Hyphens (-) but not at start or end + - No underscores, spaces, or special characters + +2. **Valid examples:** + - ✓ `web-server-01` + - ✓ `app-prod` + - ✓ `db-staging-02` + - ✗ `Web_Server_01` (uppercase, underscore) + - ✗ `app server` (space) + - ✗ `-web-01` (starts with hyphen) + +3. **Convert invalid hostnames:** + - Replace underscores with hyphens + - Convert to lowercase + - Remove spaces + - Remove leading/trailing hyphens + +### Problem: "Network configuration error" + +**Symptoms:** + +```json +{ + "error": { + "code": "PROXMOX_NETWORK_ERROR", + "message": "Invalid network configuration" + } +} +``` + +**Causes:** + +- Bridge doesn't exist on target node +- Invalid network configuration syntax +- IP address format incorrect +- Gateway not specified for static IP + +**Solutions:** + +1. **Verify bridge exists:** + + ```bash + # List available bridges + ip link show | grep vmbr + + # Or via Proxmox web interface: + # Node → System → Network + ``` + +2. **Use correct network format:** + + **For VMs:** + + ``` + model=virtio,bridge=vmbr0 + model=virtio,bridge=vmbr0,firewall=1 + model=e1000,bridge=vmbr1 + ``` + + **For LXC:** + + ``` + name=eth0,bridge=vmbr0,ip=dhcp + name=eth0,bridge=vmbr0,ip=192.168.1.100/24,gw=192.168.1.1 + ``` + +3. **Check IP address format:** + - Use CIDR notation: `192.168.1.100/24` + - Include gateway for static IPs: `gw=192.168.1.1` + - Or use DHCP: `ip=dhcp` + +4. **Common network configurations:** + + ``` + # DHCP (automatic) + name=eth0,bridge=vmbr0,ip=dhcp + + # Static IP + name=eth0,bridge=vmbr0,ip=192.168.1.50/24,gw=192.168.1.1 + + # Multiple interfaces + name=eth0,bridge=vmbr0,ip=dhcp + name=eth1,bridge=vmbr1,ip=10.0.0.50/24 + ``` + +### Problem: "Permission denied for provisioning" + +**Symptoms:** + +```json +{ + "error": { + "code": "PERMISSION_DENIED", + "message": "User does not have permission to create VMs" + } +} +``` + +**Causes:** + +- User lacks provisioning permissions +- Proxmox user/token doesn't have required permissions +- Integration not configured with proper credentials + +**Solutions:** + +1. **Check Pabawi permissions:** + - Verify your user has `provision:create_vm` or `provision:create_lxc` permission + - Contact administrator to grant permissions + - See [Permissions and RBAC Guide](permissions-rbac.md) + +2. **Check Proxmox permissions:** + + ```bash + # Via Proxmox CLI + pveum user permissions @ + + # Required permissions: + # - VM.Allocate + # - VM.Config.* + # - Datastore.Allocate + ``` + +3. **Grant Proxmox permissions:** + - Log in to Proxmox web interface + - Navigate to Datacenter → Permissions + - Add permissions for the API user/token + - See [Proxmox Setup Guide](proxmox-setup-guide.md) + +4. **Verify API token permissions:** + - Check token has privilege separation disabled + - Or grant explicit permissions to token + - Test token with curl: + + ```bash + curl -k https://proxmox:8006/api2/json/nodes \ + -H "Authorization: PVEAPIToken=user@realm!token=uuid" + ``` + +### Problem: "Provisioning operation timeout" + +**Symptoms:** + +```json +{ + "error": { + "code": "OPERATION_TIMEOUT", + "message": "Provisioning operation timed out after 300s" + } +} +``` + +**Causes:** + +- VM/container creation takes longer than timeout +- Slow storage (network storage, spinning disks) +- Target node is overloaded +- Large disk allocation + +**Solutions:** + +1. **Increase timeout:** + + ```bash + # In backend/.env + PROXMOX_TIMEOUT=600000 # 10 minutes + ``` + +2. **Check target node load:** + + ```bash + # Via Proxmox CLI + uptime + top + iostat + ``` + +3. **Use faster storage:** + - Prefer local SSD over network storage + - Use thin provisioning + - Reduce initial disk size + +4. **Reduce resource allocation:** + - Smaller disk size provisions faster + - Fewer CPU cores + - Less memory + +5. **Try again:** + - Node may have been temporarily busy + - Wait a few minutes and retry + - Choose a different node + +### Problem: "Storage not available" + +**Symptoms:** + +```json +{ + "error": { + "code": "PROXMOX_STORAGE_ERROR", + "message": "Storage 'local-lvm' is not available" + } +} +``` + +**Causes:** + +- Storage doesn't exist on target node +- Storage is disabled +- Storage is full +- Wrong storage name + +**Solutions:** + +1. **List available storage:** + + ```bash + # Via Proxmox CLI + pvesm status + + # Or via Proxmox web interface: + # Datacenter → Storage + ``` + +2. **Verify storage is enabled:** + + ```bash + # Check storage configuration + cat /etc/pve/storage.cfg + ``` + +3. **Check storage space:** + + ```bash + # Check available space + pvesm status | grep local-lvm + df -h + ``` + +4. **Use different storage:** + - Select storage that exists on target node + - Common storage names: `local`, `local-lvm`, `ceph-pool` + - Check storage type supports VMs/containers + +5. **Enable storage:** + - Proxmox web interface: Datacenter → Storage + - Edit storage and enable it + - Ensure storage is available on target node + +### Problem: "Provision menu not visible" + +**Symptoms:** + +- Provision menu item missing from navigation +- Cannot access provisioning page +- No way to create VMs/containers + +**Causes:** + +- User lacks provisioning permissions +- No provisioning integrations configured +- Integration not connected + +**Solutions:** + +1. **Check permissions:** + - Verify you have any `provision:*` permission + - Contact administrator for access + - See [Permissions and RBAC Guide](permissions-rbac.md) + +2. **Verify integration is configured:** + + ```bash + # Check integration status + curl http://localhost:3000/api/integrations/status + ``` + +3. **Check Proxmox integration:** + - Navigate to Setup page + - Verify Proxmox integration is configured + - Test connection + - See [Proxmox Setup Guide](proxmox-setup-guide.md) + +4. **Check integration health:** + - Integration must be "connected" and "healthy" + - Green status indicator + - No error messages + +5. **Refresh page:** + - Clear browser cache + - Log out and log back in + - Try different browser + +### Problem: "Form validation errors" + +**Symptoms:** + +- Cannot submit provisioning form +- Red error messages below fields +- Submit button disabled + +**Causes:** + +- Required fields empty +- Invalid field values +- Values outside acceptable ranges + +**Solutions:** + +1. **Check required fields:** + - VMID (required, 100-999999999) + - Name/Hostname (required, valid format) + - Node (required, must exist) + - OS Template (required for LXC) + +2. **Verify field formats:** + - VMID: Positive integer + - Hostname: Lowercase, alphanumeric, hyphens + - Memory: Minimum 512 MB + - Cores: Minimum 1 + +3. **Check value ranges:** + - VMID: 100 to 999999999 + - Memory: At least 512 MB + - Cores: At least 1 + - Port: 1 to 65535 + +4. **Review error messages:** + - Read validation messages carefully + - Fix indicated issues + - Submit button enables when all valid + +### Problem: "VM starts but network doesn't work" + +**Symptoms:** + +- VM created successfully +- VM is running +- No network connectivity +- Cannot ping or SSH to VM + +**Causes:** + +- Wrong network configuration +- Bridge not connected +- Firewall blocking traffic +- Guest OS network not configured + +**Solutions:** + +1. **Verify network configuration:** + - Check VM network settings in Proxmox + - Verify bridge is correct + - Check cable is "connected" + +2. **Check bridge configuration:** + + ```bash + # On Proxmox node + ip link show vmbr0 + brctl show vmbr0 + ``` + +3. **Check guest OS network:** + - Access VM console in Proxmox + - Check network interface is up: `ip addr` + - Check DHCP client is running + - Configure static IP if needed + +4. **Check firewall:** + - Proxmox firewall settings + - Guest OS firewall + - Network firewall rules + +5. **Verify DHCP:** + - If using DHCP, check DHCP server is running + - Check DHCP leases + - Try static IP instead + +### Problem: "Cannot destroy VM" + +**Symptoms:** + +```json +{ + "error": { + "code": "PROXMOX_DESTROY_ERROR", + "message": "Cannot destroy VM: VM is locked" + } +} +``` + +**Causes:** + +- VM is locked by another operation +- VM has active snapshots +- VM is in use +- Backup is running + +**Solutions:** + +1. **Wait for operations to complete:** + - Check Proxmox task log + - Wait for running tasks to finish + - Try again after a few minutes + +2. **Stop VM first:** + - Use Manage tab → Stop + - Wait for VM to stop completely + - Then try destroy again + +3. **Check for locks:** + + ```bash + # Via Proxmox CLI + qm unlock + ``` + +4. **Remove snapshots:** + - Delete VM snapshots first + - Via Proxmox web interface + - Then try destroy again + +5. **Force unlock (careful!):** + + ```bash + # Via Proxmox CLI (use with caution) + qm unlock + qm destroy + ``` + +### Problem: "LXC container won't start" + +**Symptoms:** + +- Container created successfully +- Start action fails +- Error in Proxmox logs + +**Causes:** + +- Template incompatibility +- Missing kernel features +- Resource constraints +- Configuration error + +**Solutions:** + +1. **Check Proxmox logs:** + + ```bash + # On Proxmox node + journalctl -u pve-container@ + cat /var/log/pve/tasks/* + ``` + +2. **Verify template:** + - Use official Proxmox templates + - Check template is compatible with Proxmox version + - Try different template + +3. **Check kernel features:** + + ```bash + # Verify required kernel modules + lsmod | grep overlay + lsmod | grep nf_nat + ``` + +4. **Reduce resources:** + - Try with less memory + - Reduce CPU cores + - Use smaller disk + +5. **Check configuration:** + - Review container configuration in Proxmox + - Check for invalid settings + - Compare with working container + ## Additional Resources - [Bolt Documentation](https://puppet.com/docs/bolt/) @@ -2795,3 +3463,8 @@ If you can't find a solution in this guide: - [Pabawi GitHub Repository](https://github.com/example42/pabawi) - [Pabawi API Documentation](./api.md) - [Pabawi Configuration Guide](./configuration.md) +- [Provisioning Guide](provisioning-guide.md) +- [Proxmox Setup Guide](proxmox-setup-guide.md) +- [Manage Tab Guide](manage-tab-guide.md) +- [Permissions and RBAC Guide](permissions-rbac.md) +- [Proxmox Documentation](https://pve.proxmox.com/wiki) diff --git a/frontend/package.json b/frontend/package.json index 02cc9bd3..6951dd4d 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "frontend", - "version": "0.8.0", + "version": "0.10.0", "description": "Pabawi frontend web interface", "type": "module", "scripts": { @@ -17,9 +17,11 @@ }, "devDependencies": { "@sveltejs/vite-plugin-svelte": "^6.2.1", + "@testing-library/jest-dom": "^6.9.1", "@testing-library/svelte": "^5.0.0", "@tsconfig/svelte": "^5.0.4", "autoprefixer": "^10.4.19", + "fast-check": "^4.6.0", "jsdom": "^27.4.0", "postcss": "^8.4.38", "tailwindcss": "^3.4.3", diff --git a/frontend/src/App.svelte b/frontend/src/App.svelte index b00b3e5d..73dff213 100644 --- a/frontend/src/App.svelte +++ b/frontend/src/App.svelte @@ -16,6 +16,8 @@ import GroupManagementPage from './pages/GroupManagementPage.svelte'; import GroupDetailPage from './pages/GroupDetailPage.svelte'; import RoleManagementPage from './pages/RoleManagementPage.svelte'; + import ProvisionPage from './pages/ProvisionPage.svelte'; + import IntegrationConfigPage from './pages/IntegrationConfigPage.svelte'; import { router } from './lib/router.svelte'; import type { RouteConfig } from './lib/router.svelte'; import { get } from './lib/api'; @@ -28,13 +30,15 @@ '/setup': SetupPage, '/inventory': { component: InventoryPage, requiresAuth: true }, '/executions': { component: ExecutionsPage, requiresAuth: true }, + '/provision': { component: ProvisionPage, requiresAuth: true }, '/puppet': { component: PuppetPage, requiresAuth: true }, '/users': { component: UserManagementPage, requiresAuth: true, requiresAdmin: true }, '/groups': { component: GroupManagementPage, requiresAuth: true, requiresAdmin: true }, '/groups/:id': { component: GroupDetailPage, requiresAuth: true }, '/roles': { component: RoleManagementPage, requiresAuth: true, requiresAdmin: true }, '/nodes/:id': { component: NodeDetailPage, requiresAuth: true }, - '/integrations/:integration/setup': { component: IntegrationSetupPage, requiresAuth: true } + '/integrations/:integration/setup': { component: IntegrationSetupPage, requiresAuth: true }, + '/integrations/config': { component: IntegrationConfigPage, requiresAuth: true } }; let setupComplete = $state(true); // Default to true to avoid flashing @@ -96,7 +100,7 @@ {#if setupComplete}