diff --git a/.dockerignore b/.dockerignore index ffa6d51..54326d7 100644 --- a/.dockerignore +++ b/.dockerignore @@ -10,10 +10,6 @@ __pycache__/ # Local build artifacts /afs -/afs-server -/redis-qmd -/module/*.so -/module/*.xo /mount/agent-filesystem-mount /mount/agent-filesystem-nfs /sandbox/sandbox @@ -33,6 +29,5 @@ __pycache__/ afs.config.json afs.databases.json -# Retired root note/artifact folders +# Retired root note folder; plans/ stays in the build context. /tasks -/plans diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..fe7a9f5 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,99 @@ +name: CI + +on: + pull_request: + push: + branches: + - main + workflow_dispatch: + +permissions: + contents: read + +jobs: + go-root: + name: Go (root) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-go@v6 + with: + go-version: "1.22" + cache-dependency-path: go.sum + - name: Vet + run: go vet ./cmd/... ./internal/... ./deploy/... + - name: Test + run: go test ./cmd/... ./internal/... ./deploy/... + + go-mount: + name: Go (mount) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + # Mount client tests fork a real redis-server per test (see + # mount/internal/client/native_test.go); install the binary so the + # tests can spawn it. + - name: Install redis-server + run: sudo apt-get update && sudo apt-get install -y redis-server + - uses: actions/setup-go@v6 + with: + go-version: "1.22" + cache-dependency-path: mount/go.sum + - name: Vet + working-directory: mount + run: go vet ./... + - name: Test + working-directory: mount + run: go test ./... + + go-sandbox: + name: Go (sandbox) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-go@v6 + with: + go-version: "1.22" + cache-dependency-path: sandbox/go.sum + - name: Vet + working-directory: sandbox + run: go vet ./... + - name: Test + working-directory: sandbox + run: go test ./... + + ui: + name: UI + runs-on: ubuntu-latest + defaults: + run: + working-directory: ui + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-node@v6 + with: + node-version: "24" + cache: npm + cache-dependency-path: ui/package-lock.json + - run: npm ci + - name: Build + run: npm run build + - name: Test + run: npm test + + ui-lint: + name: UI lint + runs-on: ubuntu-latest + defaults: + run: + working-directory: ui + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-node@v6 + with: + node-version: "24" + cache: npm + cache-dependency-path: ui/package-lock.json + - run: npm ci + - name: Lint + run: npm run lint diff --git a/.gitignore b/.gitignore index b14b28f..957c336 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ *.dSYM/ /afs /afs-control-plane -/afs-server mount/agent-filesystem-mount mount/agent-filesystem-nfs sandbox/sandbox @@ -11,7 +10,6 @@ afs.config.json afs.config.json.backup afs.databases.json afs.catalog.sqlite* -module/ ui/.tanstack/ .claude/ diff --git a/Makefile b/Makefile index 06bb3b0..fb76b5d 100644 --- a/Makefile +++ b/Makefile @@ -88,7 +88,7 @@ uninstall: ## Remove the installed afs symlink from $(BINDIR). clean: ## Remove compiled artifacts. $(MAKE) -C mount clean - $(RM) afs afs-control-plane afs-server + $(RM) afs afs-control-plane test: ## Run Go unit tests for the active product surfaces. go test ./cmd/... ./deploy/... ./internal/... diff --git a/cmd/afs/afs_mcp.go b/cmd/afs/afs_mcp.go index b502d2f..306bb9a 100644 --- a/cmd/afs/afs_mcp.go +++ b/cmd/afs/afs_mcp.go @@ -3,9 +3,6 @@ package main import ( "bufio" "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" "errors" "fmt" "io" @@ -17,6 +14,7 @@ import ( "github.com/redis/agent-filesystem/internal/controlplane" "github.com/redis/agent-filesystem/internal/mcpproto" + "github.com/redis/agent-filesystem/internal/mcptools" "github.com/redis/agent-filesystem/mount/client" "github.com/redis/go-redis/v9" ) @@ -31,50 +29,41 @@ type afsMCPServer struct { workspaceLocked string } -type mcpFileListItem struct { - Path string `json:"path"` - Name string `json:"name"` - Kind string `json:"kind"` - Size int64 `json:"size"` - ModifiedAt string `json:"modified_at,omitempty"` - Target string `json:"target,omitempty"` -} - -type mcpGrepMatch struct { - Path string `json:"path"` - Line int64 `json:"line,omitempty"` - Text string `json:"text"` - Binary bool `json:"binary,omitempty"` -} - -type mcpGrepCount struct { - Path string `json:"path"` - Count int `json:"count"` -} - -type mcpFilePatchOp struct { - Op string `json:"op"` - StartLine *int `json:"start_line,omitempty"` - EndLine *int `json:"end_line,omitempty"` - Old string `json:"old,omitempty"` - New string `json:"new,omitempty"` - ContextBefore string `json:"context_before,omitempty"` - ContextAfter string `json:"context_after,omitempty"` -} - -type mcpFilePatchInput struct { - Workspace string `json:"workspace,omitempty"` - Path string `json:"path"` - ExpectedSHA256 string `json:"expected_sha256,omitempty"` - Patches []mcpFilePatchOp `json:"patches"` -} +// Aliases for shared MCP helpers and types. The canonical definitions +// live in internal/mcptools — see that package for behavior. Local names +// are retained so existing call sites in this file keep working. +type ( + mcpFileListItem = mcptools.FileListItem + mcpGrepMatch = mcptools.GrepMatch + mcpGrepCount = mcptools.GrepCount + mcpFilePatchOp = mcptools.FilePatchOp + mcpFilePatchInput = mcptools.FilePatchInput + mcpTextMatch = mcptools.TextMatch +) -type mcpTextMatch struct { - Start int - End int - StartLine int - EndLine int -} +var ( + mcpRequiredString = mcptools.RequiredString + mcpRequiredText = mcptools.RequiredText + mcpOptionalString = mcptools.OptionalString + mcpOptionalText = mcptools.OptionalText + mcpStringDefault = mcptools.StringDefault + mcpBool = mcptools.Bool + mcpInt = mcptools.Int + mcpOptionalInt = mcptools.OptionalInt + mcpOptionalInt64 = mcptools.OptionalInt64 + mcpOptionalStringSlice = mcptools.OptionalStringSlice + decodeMCPArgs = mcptools.DecodeArgs + textSHA256 = mcptools.TextSHA256 + countTextMatches = mcptools.CountTextMatches + findSingleTextMatch = mcptools.FindSingleTextMatch + matchMatchesConstraints = mcptools.MatchMatchesConstraints + lineNumberAtOffset = mcptools.LineNumberAtOffset + textEndLine = mcptools.TextEndLine + applyMCPTextPatch = mcptools.ApplyTextPatch + insertOffsetForLine = mcptools.InsertOffsetForLine + deleteContentLines = mcptools.DeleteContentLines + splitTextLines = mcptools.SplitTextLines +) func (s *afsMCPServer) effectiveProfile() string { profile, err := controlplane.NormalizeMCPProfile(s.profile) @@ -2146,157 +2135,6 @@ func mcpHistoryDirection(raw string) (bool, error) { } } -func mcpRequiredString(args map[string]any, key string) (string, error) { - value, ok := args[key] - if !ok { - return "", fmt.Errorf("missing required argument %q", key) - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - text = strings.TrimSpace(text) - if text == "" { - return "", fmt.Errorf("argument %q must not be empty", key) - } - return text, nil -} - -func mcpRequiredText(args map[string]any, key string, allowEmpty bool) (string, error) { - value, ok := args[key] - if !ok { - return "", fmt.Errorf("missing required argument %q", key) - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - if !allowEmpty && strings.TrimSpace(text) == "" { - return "", fmt.Errorf("argument %q must not be empty", key) - } - return text, nil -} - -func mcpOptionalString(args map[string]any, key string) (string, error) { - value, ok := args[key] - if !ok || value == nil { - return "", nil - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - return strings.TrimSpace(text), nil -} - -func mcpOptionalText(args map[string]any, key string) (string, error) { - value, ok := args[key] - if !ok || value == nil { - return "", nil - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - return text, nil -} - -func mcpStringDefault(args map[string]any, key, fallback string) (string, error) { - value, err := mcpOptionalString(args, key) - if err != nil { - return "", err - } - if value == "" { - return fallback, nil - } - return value, nil -} - -func mcpBool(args map[string]any, key string, fallback bool) (bool, error) { - value, ok := args[key] - if !ok || value == nil { - return fallback, nil - } - switch v := value.(type) { - case bool: - return v, nil - default: - return false, fmt.Errorf("argument %q must be a boolean", key) - } -} - -func mcpInt(args map[string]any, key string, fallback int) (int, error) { - value, ok := args[key] - if !ok || value == nil { - return fallback, nil - } - switch v := value.(type) { - case float64: - return int(v), nil - case int: - return v, nil - case int64: - return int(v), nil - default: - return 0, fmt.Errorf("argument %q must be an integer", key) - } -} - -func mcpOptionalInt(args map[string]any, key string) (*int, error) { - value, ok := args[key] - if !ok || value == nil { - return nil, nil - } - intValue, err := mcpInt(args, key, 0) - if err != nil { - return nil, err - } - return &intValue, nil -} - -func mcpOptionalInt64(args map[string]any, key string) (*int64, error) { - value, ok := args[key] - if !ok || value == nil { - return nil, nil - } - switch v := value.(type) { - case float64: - result := int64(v) - return &result, nil - case int: - result := int64(v) - return &result, nil - case int64: - return &v, nil - default: - return nil, fmt.Errorf("argument %q must be an integer", key) - } -} - -func mcpOptionalStringSlice(args map[string]any, key string) (*[]string, error) { - value, ok := args[key] - if !ok || value == nil { - return nil, nil - } - switch typed := value.(type) { - case []string: - copied := append([]string(nil), typed...) - return &copied, nil - case []any: - values := make([]string, 0, len(typed)) - for _, item := range typed { - text, ok := item.(string) - if !ok { - return nil, fmt.Errorf("argument %q must be an array of strings", key) - } - values = append(values, text) - } - return &values, nil - default: - return nil, fmt.Errorf("argument %q must be an array of strings", key) - } -} - func applyWorkspaceVersioningPolicyPatchArgs(base controlplane.WorkspaceVersioningPolicy, args map[string]any) (controlplane.WorkspaceVersioningPolicy, error) { next := base @@ -2353,226 +2191,6 @@ func applyWorkspaceVersioningPolicyPatchArgs(base controlplane.WorkspaceVersioni return next, nil } -func decodeMCPArgs(args map[string]any, target any) error { - body, err := json.Marshal(args) - if err != nil { - return err - } - return json.Unmarshal(body, target) -} - -func textSHA256(content string) string { - sum := sha256.Sum256([]byte(content)) - return hex.EncodeToString(sum[:]) -} - -func countTextMatches(content, old, contextBefore, contextAfter string, startLine, endLine *int) int { - if old == "" { - return 0 - } - count := 0 - offset := 0 - for { - index := strings.Index(content[offset:], old) - if index < 0 { - break - } - matchStart := offset + index - matchEnd := matchStart + len(old) - match := mcpTextMatch{ - Start: matchStart, - End: matchEnd, - StartLine: lineNumberAtOffset(content, matchStart), - EndLine: textEndLine(lineNumberAtOffset(content, matchStart), old), - } - if matchMatchesConstraints(content, match, contextBefore, contextAfter, startLine, endLine) { - count++ - } - offset = matchStart + len(old) - } - return count -} - -func findSingleTextMatch(content, old, contextBefore, contextAfter string, startLine, endLine *int) (mcpTextMatch, error) { - if old == "" { - return mcpTextMatch{}, errors.New("old text must not be empty") - } - var ( - match mcpTextMatch - found bool - offset int - count int - ) - for { - index := strings.Index(content[offset:], old) - if index < 0 { - break - } - matchStart := offset + index - matchEnd := matchStart + len(old) - current := mcpTextMatch{ - Start: matchStart, - End: matchEnd, - StartLine: lineNumberAtOffset(content, matchStart), - EndLine: textEndLine(lineNumberAtOffset(content, matchStart), old), - } - if matchMatchesConstraints(content, current, contextBefore, contextAfter, startLine, endLine) { - match = current - found = true - count++ - } - offset = matchStart + len(old) - } - switch { - case !found: - return mcpTextMatch{}, errors.New("target text not found with the requested constraints") - case count > 1: - return mcpTextMatch{}, fmt.Errorf("target text matched %d times; refine with start_line or surrounding context", count) - default: - return match, nil - } -} - -func matchMatchesConstraints(content string, match mcpTextMatch, contextBefore, contextAfter string, startLine, endLine *int) bool { - if startLine != nil && match.StartLine != *startLine { - return false - } - if endLine != nil && match.EndLine != *endLine { - return false - } - if contextBefore != "" && !strings.HasSuffix(content[:match.Start], contextBefore) { - return false - } - if contextAfter != "" && !strings.HasPrefix(content[match.End:], contextAfter) { - return false - } - return true -} - -func lineNumberAtOffset(content string, offset int) int { - if offset <= 0 { - return 1 - } - return strings.Count(content[:offset], "\n") + 1 -} - -func textEndLine(startLine int, text string) int { - if text == "" { - return startLine - } - newlineCount := strings.Count(text, "\n") - if newlineCount == 0 { - return startLine - } - if strings.HasSuffix(text, "\n") { - return startLine + newlineCount - 1 - } - return startLine + newlineCount -} - -func applyMCPTextPatch(content string, patch mcpFilePatchOp) (string, map[string]any, error) { - switch patch.Op { - case "replace": - if patch.Old == "" { - return "", nil, errors.New("replace patch requires old") - } - match, err := findSingleTextMatch(content, patch.Old, patch.ContextBefore, patch.ContextAfter, patch.StartLine, patch.EndLine) - if err != nil { - return "", nil, err - } - return content[:match.Start] + patch.New + content[match.End:], map[string]any{ - "op": patch.Op, - "start_line": match.StartLine, - "end_line": match.EndLine, - }, nil - case "insert": - if patch.New == "" { - return "", nil, errors.New("insert patch requires new") - } - if patch.StartLine == nil { - return "", nil, errors.New("insert patch requires start_line") - } - insertOffset, actualLine, err := insertOffsetForLine(content, *patch.StartLine) - if err != nil { - return "", nil, err - } - if patch.ContextBefore != "" && !strings.HasSuffix(content[:insertOffset], patch.ContextBefore) { - return "", nil, errors.New("insert patch context_before did not match") - } - if patch.ContextAfter != "" && !strings.HasPrefix(content[insertOffset:], patch.ContextAfter) { - return "", nil, errors.New("insert patch context_after did not match") - } - return content[:insertOffset] + patch.New + content[insertOffset:], map[string]any{ - "op": patch.Op, - "start_line": actualLine, - }, nil - case "delete": - switch { - case patch.Old != "": - match, err := findSingleTextMatch(content, patch.Old, patch.ContextBefore, patch.ContextAfter, patch.StartLine, patch.EndLine) - if err != nil { - return "", nil, err - } - return content[:match.Start] + content[match.End:], map[string]any{ - "op": patch.Op, - "start_line": match.StartLine, - "end_line": match.EndLine, - }, nil - case patch.StartLine != nil && patch.EndLine != nil: - next, deleted, err := deleteContentLines(content, *patch.StartLine, *patch.EndLine) - if err != nil { - return "", nil, err - } - return next, map[string]any{ - "op": patch.Op, - "start_line": *patch.StartLine, - "end_line": *patch.EndLine, - "deleted_lines": deleted, - }, nil - default: - return "", nil, errors.New("delete patch requires old or both start_line and end_line") - } - default: - return "", nil, fmt.Errorf("unsupported patch op %q", patch.Op) - } -} - -func insertOffsetForLine(content string, startLine int) (int, int, error) { - if startLine < -1 { - return 0, 0, errors.New("start_line must be >= -1") - } - if startLine == -1 { - return len(content), -1, nil - } - if startLine == 0 { - return 0, 0, nil - } - lines := splitTextLines(content) - if startLine > len(lines) { - return 0, 0, fmt.Errorf("start_line %d is beyond EOF", startLine) - } - offset := 0 - for i := 0; i < startLine; i++ { - offset += len(lines[i]) - } - return offset, startLine, nil -} - -func deleteContentLines(content string, start, end int) (string, int, error) { - if start <= 0 || end < start { - return "", 0, errors.New("start_line and end_line must be >= 1 and end_line must be >= start_line") - } - lines := splitTextLines(content) - if start > len(lines) { - return "", 0, fmt.Errorf("start_line %d is beyond EOF", start) - } - if end > len(lines) { - end = len(lines) - } - next := strings.Join(append(lines[:start-1], lines[end:]...), "") - return next, end - start + 1, nil -} - func readLocalWorkspaceEntry(workspace, normalizedPath, localPath string, info os.FileInfo) (any, error) { response := map[string]any{ "workspace": workspace, @@ -2630,17 +2248,6 @@ func readTextWorkspaceFile(localPath, normalizedPath string) (string, os.FileInf return string(data), info, nil } -func splitTextLines(content string) []string { - if content == "" { - return []string{} - } - lines := strings.SplitAfter(content, "\n") - if len(lines) > 0 && lines[len(lines)-1] == "" { - lines = lines[:len(lines)-1] - } - return lines -} - func listLocalWorkspaceEntries(treePath, localPath, manifestPath string, depth int) ([]mcpFileListItem, error) { entries := make([]mcpFileListItem, 0) err := listLocalWorkspaceEntriesRecursive(treePath, localPath, manifestPath, depth, &entries) diff --git a/cmd/afs/sync_reconciler.go b/cmd/afs/sync_event_reconciler.go similarity index 100% rename from cmd/afs/sync_reconciler.go rename to cmd/afs/sync_event_reconciler.go diff --git a/cmd/afs/sync_reconciler_test.go b/cmd/afs/sync_event_reconciler_test.go similarity index 100% rename from cmd/afs/sync_reconciler_test.go rename to cmd/afs/sync_event_reconciler_test.go diff --git a/cmd/afs/sync_reconcile.go b/cmd/afs/sync_full_reconciler.go similarity index 100% rename from cmd/afs/sync_reconcile.go rename to cmd/afs/sync_full_reconciler.go diff --git a/deploy/vercel/README.md b/deploy/vercel/README.md index 9472f72..d6c5a75 100644 --- a/deploy/vercel/README.md +++ b/deploy/vercel/README.md @@ -20,14 +20,14 @@ What does not belong here: Current docs: -- [deployment-shape.md](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/deployment-shape.md) -- [onboarding-flows.md](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/onboarding-flows.md) -- [auth-plan.md](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/auth-plan.md) +- [deployment-shape.md](deployment-shape.md) +- Hosted onboarding plan: [../../plans/cloud-onboarding.md](../../plans/cloud-onboarding.md) +- Hosted auth plan: [../../plans/cloud-auth.md](../../plans/cloud-auth.md) Current wrapper: -- [main.go](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/main.go) is the thin Vercel-specific control-plane entrypoint used for preview boot/build validation. -- [preview.sh](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/preview.sh) stages a temporary Vercel build root and deploys a preview with the repo-root Go module intact. +- [main.go](main.go) is the thin Vercel-specific control-plane entrypoint used for preview boot/build validation. +- [preview.sh](preview.sh) stages a temporary Vercel build root and deploys a preview with the repo-root Go module intact. Preview workflow: @@ -47,7 +47,7 @@ Production workflow: Notes: - The script intentionally uses `npx --yes vercel@latest` so it does not collide with any local binary named `vercel`. -- If [deploy/vercel/.vercel/project.json](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/.vercel/project.json) exists locally, the script copies that link metadata into the temporary staging directory before deploy. +- If `.vercel/project.json` exists locally under this directory, the script copies that link metadata into the temporary staging directory before deploy. - If the project is not linked yet, pass `--scope --project ` and the script will link the staging directory before deploying. -- [smoke.sh](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/smoke.sh) uses `vercel curl` so it can hit protected preview deployments without needing a public share link. -- [prod.sh](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/prod.sh) deploys the same staged build root to production and can optionally try to attach a production alias. +- [smoke.sh](smoke.sh) uses `vercel curl` so it can hit protected preview deployments without needing a public share link. +- [prod.sh](prod.sh) deploys the same staged build root to production and can optionally try to attach a production alias. diff --git a/docs/internals/repo-walkthrough.md b/docs/internals/repo-walkthrough.md index 1603b5c..3b54ea6 100644 --- a/docs/internals/repo-walkthrough.md +++ b/docs/internals/repo-walkthrough.md @@ -1,6 +1,6 @@ # Agent Filesystem Repo Walkthrough -This guide is the "what lives where" map for the current `/Users/rowantrollope/git/agent-filesystem` working tree as of 2026-04-24. +This guide is the "what lives where" map for the current `agent-filesystem` working tree as of 2026-05-05. ## Scope @@ -72,6 +72,22 @@ Supporting areas: - Manifest scanning, blob collection, and local materialization helpers. +### `internal/rediscontent/` + +- Shared Redis content backend helpers: Array capability detection, chunked array IO, and `ARGREP` literal prefiltering used by mount and control-plane file-content paths. + +### `internal/searchindex/` + +- RediSearch index helpers used by grep and workspace catalog. + +### `internal/mcpproto/` + +- Shared MCP protocol types used by both the CLI and hosted MCP surfaces. + +### `internal/version/` + +- Build-time version metadata injected via `-ldflags` from the Makefile. + ### `mount/` - The Redis-backed filesystem client plus the FUSE and NFS daemons. diff --git a/example.afs.config.json b/example.afs.config.json index c8005a1..d8892bb 100644 --- a/example.afs.config.json +++ b/example.afs.config.json @@ -6,8 +6,23 @@ "db": 0, "tls": false }, - "controlPlane": {}, - "agent": {}, - "productMode": "local", - "mode": "sync" + "mode": "sync", + "currentWorkspace": "", + "localPath": "~/afs", + "mount": { + "backend": "none", + "readOnly": false, + "allowOther": false, + "mountBin": "", + "nfsBin": "", + "nfsHost": "127.0.0.1", + "nfsPort": 20490 + }, + "logs": { + "mount": "/tmp/afs-mount.log", + "sync": "/tmp/afs-sync.log" + }, + "sync": { + "fileSizeCapMB": 2048 + } } diff --git a/examples/codex-settings-migration.md b/examples/codex-settings-migration.md deleted file mode 100644 index cabf39b..0000000 --- a/examples/codex-settings-migration.md +++ /dev/null @@ -1,142 +0,0 @@ -# Share Codex State Across Computers with Agent Filesystem - -This guide shows how to put `~/.codex` into Agent Filesystem on one computer, then mount that same shared state on other computers so Codex keeps the same memory and settings everywhere. - -Use this when: - -- Codex stores local state in `~/.codex` -- you want the same state across multiple machines -- you usually use one machine at a time and want to resume cleanly when you switch - -## Recommended exclusions - -Before importing, create `~/.codex/.afsignore` to exclude machine-local or high-churn state you do not want to sync. - -Suggested starting point: - -```gitignore -# High-churn caches -cache/ -tmp/ - -# Local checkout state -worktrees/ - -# Local logs and temp files -logs/ -*.log -*.tmp -*.pid -*.sock -``` - -`worktrees/` is a good default exclusion. It is usually large, machine-local, and likely to cause confusion if multiple computers treat it as shared state. - -Because `.afsignore` uses `.gitignore`-style rules, you can also re-include a specific file with `!`, for example: - -```gitignore -*.log -!logs/important.log -``` - -## Machine 1: import the existing `~/.codex` - -Build Agent Filesystem: - -```bash -cd /path/to/agent-filesystem -make -``` - -Connect AFS to your shared Redis instance. - -Important setup choices: - -- choose your shared Redis host, password, and DB -- choose workspace name `.codex` -- choose mountpoint `~/.codex` - -Create or review the ignore file: - -```bash -cat > ~/.codex/.afsignore <<'EOF' -cache/ -tmp/ -worktrees/ -logs/ -*.log -*.tmp -*.pid -*.sock -EOF -``` - -Import the existing directory into the `.codex` workspace and mount it in place: - -```bash -./afs ws import --mount-at-source .codex ~/.codex -``` - -What that does: - -- imports `~/.codex` into the workspace `.codex` -- mounts the imported workspace at `~/.codex` - -Verify: - -```bash -./afs status -ls -la ~/.codex -``` - -## Machine 2 and later: mount the same shared Codex state - -On each additional computer: - -1. Build `agent-filesystem`. -2. Connect it to the same control plane or Redis instance. -3. Choose sync or mount mode. -4. Attach workspace `.codex` at `~/.codex`. - -Back up any existing local Codex directory first: - -```bash -if [ -d ~/.codex ]; then mv ~/.codex ~/.codex.local-backup; fi -mkdir -p ~/.codex -``` - -Then mount the shared workspace: - -```bash -./afs ws mount .codex ~/.codex -./afs status -ls -la ~/.codex -``` - -## Agent checklist - -If you want an agent to perform this, the agent should: - -1. Confirm Codex is not currently running on the machines involved. -2. Recommend creating `~/.codex/.afsignore` before import. -3. Suggest excluding `worktrees/` by default, unless the user explicitly wants local checkout state shared. -4. Build `agent-filesystem` with `make`. -5. On the first machine, connect AFS, then run `./afs ws import --mount-at-source .codex ~/.codex`. -6. On later machines, back up any existing `~/.codex`, connect AFS, then run `./afs ws mount .codex ~/.codex`. -7. Verify that the same Codex files appear on every machine. - -## Rollback - -Undo on the first computer: - -```bash -./afs ws unmount ~/.codex -``` - -Undo on a later computer: - -```bash -./afs ws unmount ~/.codex -rm -rf ~/.codex -mv ~/.codex.local-backup ~/.codex -``` diff --git a/internal/controlplane/file_versions.go b/internal/controlplane/file_versions.go index 0058c23..7558e87 100644 --- a/internal/controlplane/file_versions.go +++ b/internal/controlplane/file_versions.go @@ -1063,30 +1063,6 @@ func (s *Store) getFileLineageForCmd(ctx context.Context, cmd redis.Cmdable, sto return getJSON[FileLineage](ctx, cmd, fileLineageMetaKey(storageID, strings.TrimSpace(fileID))) } -func WorkspacePathFileIDsKey(workspace string) string { - return workspacePathFileIDsKey(workspace) -} - -func WorkspacePathHistoryKey(workspace, normalizedPath string) string { - return workspacePathHistoryKey(workspace, normalizedPath) -} - -func WorkspaceVersionFileIDsKey(workspace string) string { - return workspaceVersionFileIDsKey(workspace) -} - -func FileLineageMetaKey(workspace, fileID string) string { - return fileLineageMetaKey(workspace, fileID) -} - -func FileLineageVersionsKey(workspace, fileID string) string { - return fileLineageVersionsKey(workspace, fileID) -} - -func FileVersionKey(workspace, fileID, versionID string) string { - return fileVersionKey(workspace, fileID, versionID) -} - func workspacePathFileIDsKey(workspace string) string { return fmt.Sprintf("afs:{%s}:workspace:path_file_ids", workspace) } diff --git a/internal/controlplane/mcp_hosted.go b/internal/controlplane/mcp_hosted.go index 64a2762..e63aca4 100644 --- a/internal/controlplane/mcp_hosted.go +++ b/internal/controlplane/mcp_hosted.go @@ -32,22 +32,6 @@ func (p *hostedMCPProvider) isControlPlane() bool { return isControlPlaneScope(p.scope) } -type mcpFileListItem struct { - Path string `json:"path"` - Name string `json:"name"` - Kind string `json:"kind"` - Size int64 `json:"size"` - ModifiedAt string `json:"modified_at,omitempty"` - Target string `json:"target,omitempty"` -} - -type mcpGrepMatch struct { - Path string `json:"path"` - Line int64 `json:"line,omitempty"` - Text string `json:"text"` - Binary bool `json:"binary,omitempty"` -} - func authWrappedMCPHandler(manager *DatabaseManager, auth *AuthHandler) http.Handler { server := &mcpproto.Server{ ProtocolVersion: hostedMCPProtocolVersion, @@ -1715,90 +1699,6 @@ func mcpHistoryDirection(raw string) (bool, error) { } } -func mcpRequiredString(args map[string]any, key string) (string, error) { - value, ok := args[key] - if !ok { - return "", fmt.Errorf("missing required argument %q", key) - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - text = strings.TrimSpace(text) - if text == "" { - return "", fmt.Errorf("argument %q must not be empty", key) - } - return text, nil -} - -func mcpRequiredText(args map[string]any, key string, allowEmpty bool) (string, error) { - value, ok := args[key] - if !ok { - return "", fmt.Errorf("missing required argument %q", key) - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - if !allowEmpty && strings.TrimSpace(text) == "" { - return "", fmt.Errorf("argument %q must not be empty", key) - } - return text, nil -} - -func mcpOptionalString(args map[string]any, key string) (string, error) { - value, ok := args[key] - if !ok || value == nil { - return "", nil - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - return strings.TrimSpace(text), nil -} - -func mcpStringDefault(args map[string]any, key, fallback string) (string, error) { - value, err := mcpOptionalString(args, key) - if err != nil { - return "", err - } - if value == "" { - return fallback, nil - } - return value, nil -} - -func mcpBool(args map[string]any, key string, fallback bool) (bool, error) { - value, ok := args[key] - if !ok || value == nil { - return fallback, nil - } - switch v := value.(type) { - case bool: - return v, nil - default: - return false, fmt.Errorf("argument %q must be a boolean", key) - } -} - -func mcpInt(args map[string]any, key string, fallback int) (int, error) { - value, ok := args[key] - if !ok || value == nil { - return fallback, nil - } - switch v := value.(type) { - case float64: - return int(v), nil - case int: - return v, nil - case int64: - return int(v), nil - default: - return 0, fmt.Errorf("argument %q must be an integer", key) - } -} - func grepBinaryPrefix(data []byte) bool { limit := len(data) if limit > 8192 { diff --git a/internal/controlplane/mcp_hosted_helpers.go b/internal/controlplane/mcp_hosted_helpers.go index 738694e..4fd67a1 100644 --- a/internal/controlplane/mcp_hosted_helpers.go +++ b/internal/controlplane/mcp_hosted_helpers.go @@ -2,46 +2,51 @@ package controlplane import ( "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" "fmt" "os" "path" "regexp" "strings" + "github.com/redis/agent-filesystem/internal/mcptools" afsclient "github.com/redis/agent-filesystem/mount/client" ) -type mcpFilePatchOp struct { - Op string `json:"op"` - StartLine *int `json:"start_line,omitempty"` - EndLine *int `json:"end_line,omitempty"` - Old string `json:"old,omitempty"` - New string `json:"new,omitempty"` - ContextBefore string `json:"context_before,omitempty"` - ContextAfter string `json:"context_after,omitempty"` -} - -type mcpFilePatchInput struct { - Path string `json:"path"` - ExpectedSHA256 string `json:"expected_sha256,omitempty"` - Patches []mcpFilePatchOp `json:"patches"` -} - -type mcpTextMatch struct { - Start int - End int - StartLine int - EndLine int -} +// Aliases for shared MCP helpers and types. The canonical definitions +// live in internal/mcptools — see that package for behavior. Local names +// are retained so existing call sites in this package keep working. +type ( + mcpFilePatchOp = mcptools.FilePatchOp + mcpFilePatchInput = mcptools.FilePatchInput + mcpTextMatch = mcptools.TextMatch + mcpGrepCount = mcptools.GrepCount + mcpGrepMatch = mcptools.GrepMatch + mcpFileListItem = mcptools.FileListItem +) -type mcpGrepCount struct { - Path string `json:"path"` - Count int `json:"count"` -} +var ( + mcpRequiredString = mcptools.RequiredString + mcpRequiredText = mcptools.RequiredText + mcpOptionalString = mcptools.OptionalString + mcpOptionalText = mcptools.OptionalText + mcpStringDefault = mcptools.StringDefault + mcpBool = mcptools.Bool + mcpInt = mcptools.Int + mcpOptionalInt = mcptools.OptionalInt + mcpOptionalInt64 = mcptools.OptionalInt64 + mcpOptionalStringSlice = mcptools.OptionalStringSlice + decodeMCPArgs = mcptools.DecodeArgs + textSHA256 = mcptools.TextSHA256 + countTextMatches = mcptools.CountTextMatches + findSingleTextMatch = mcptools.FindSingleTextMatch + matchMatchesConstraints = mcptools.MatchMatchesConstraints + lineNumberAtOffset = mcptools.LineNumberAtOffset + textEndLine = mcptools.TextEndLine + applyMCPTextPatch = mcptools.ApplyTextPatch + insertOffsetForLine = mcptools.InsertOffsetForLine + deleteContentLines = mcptools.DeleteContentLines + splitTextLines = mcptools.SplitTextLines +) type hostedMCPGrepOptions struct { path string @@ -86,73 +91,6 @@ type mcpWorkspaceVersioningPolicyPatch struct { LargeFileCutoffBytes *int64 } -func mcpOptionalText(args map[string]any, key string) (string, error) { - value, ok := args[key] - if !ok || value == nil { - return "", nil - } - text, ok := value.(string) - if !ok { - return "", fmt.Errorf("argument %q must be a string", key) - } - return text, nil -} - -func mcpOptionalInt(args map[string]any, key string) (*int, error) { - value, ok := args[key] - if !ok || value == nil { - return nil, nil - } - intValue, err := mcpInt(args, key, 0) - if err != nil { - return nil, err - } - return &intValue, nil -} - -func mcpOptionalInt64(args map[string]any, key string) (*int64, error) { - value, ok := args[key] - if !ok || value == nil { - return nil, nil - } - switch v := value.(type) { - case float64: - result := int64(v) - return &result, nil - case int: - result := int64(v) - return &result, nil - case int64: - return &v, nil - default: - return nil, fmt.Errorf("argument %q must be an integer", key) - } -} - -func mcpOptionalStringSlice(args map[string]any, key string) (*[]string, error) { - value, ok := args[key] - if !ok || value == nil { - return nil, nil - } - switch typed := value.(type) { - case []string: - copied := append([]string(nil), typed...) - return &copied, nil - case []any: - values := make([]string, 0, len(typed)) - for _, item := range typed { - text, ok := item.(string) - if !ok { - return nil, fmt.Errorf("argument %q must be an array of strings", key) - } - values = append(values, text) - } - return &values, nil - default: - return nil, fmt.Errorf("argument %q must be an array of strings", key) - } -} - func mcpWorkspaceVersioningPolicyPatchFromArgs(args map[string]any) (mcpWorkspaceVersioningPolicyPatch, error) { var patch mcpWorkspaceVersioningPolicyPatch @@ -223,237 +161,6 @@ func applyMCPWorkspaceVersioningPolicyPatch(base WorkspaceVersioningPolicy, patc return next } -func decodeMCPArgs(args map[string]any, target any) error { - body, err := json.Marshal(args) - if err != nil { - return err - } - return json.Unmarshal(body, target) -} - -func textSHA256(content string) string { - sum := sha256.Sum256([]byte(content)) - return hex.EncodeToString(sum[:]) -} - -func countTextMatches(content, old, contextBefore, contextAfter string, startLine, endLine *int) int { - if old == "" { - return 0 - } - count := 0 - offset := 0 - for { - index := strings.Index(content[offset:], old) - if index < 0 { - break - } - matchStart := offset + index - matchEnd := matchStart + len(old) - match := mcpTextMatch{ - Start: matchStart, - End: matchEnd, - StartLine: lineNumberAtOffset(content, matchStart), - EndLine: textEndLine(lineNumberAtOffset(content, matchStart), old), - } - if matchMatchesConstraints(content, match, contextBefore, contextAfter, startLine, endLine) { - count++ - } - offset = matchStart + len(old) - } - return count -} - -func findSingleTextMatch(content, old, contextBefore, contextAfter string, startLine, endLine *int) (mcpTextMatch, error) { - if old == "" { - return mcpTextMatch{}, errors.New("old text must not be empty") - } - var ( - match mcpTextMatch - found bool - offset int - count int - ) - for { - index := strings.Index(content[offset:], old) - if index < 0 { - break - } - matchStart := offset + index - matchEnd := matchStart + len(old) - current := mcpTextMatch{ - Start: matchStart, - End: matchEnd, - StartLine: lineNumberAtOffset(content, matchStart), - EndLine: textEndLine(lineNumberAtOffset(content, matchStart), old), - } - if matchMatchesConstraints(content, current, contextBefore, contextAfter, startLine, endLine) { - match = current - found = true - count++ - } - offset = matchStart + len(old) - } - switch { - case !found: - return mcpTextMatch{}, errors.New("target text not found with the requested constraints") - case count > 1: - return mcpTextMatch{}, fmt.Errorf("target text matched %d times; refine with start_line or surrounding context", count) - default: - return match, nil - } -} - -func matchMatchesConstraints(content string, match mcpTextMatch, contextBefore, contextAfter string, startLine, endLine *int) bool { - if startLine != nil && match.StartLine != *startLine { - return false - } - if endLine != nil && match.EndLine != *endLine { - return false - } - if contextBefore != "" && !strings.HasSuffix(content[:match.Start], contextBefore) { - return false - } - if contextAfter != "" && !strings.HasPrefix(content[match.End:], contextAfter) { - return false - } - return true -} - -func lineNumberAtOffset(content string, offset int) int { - if offset <= 0 { - return 1 - } - return strings.Count(content[:offset], "\n") + 1 -} - -func textEndLine(startLine int, text string) int { - if text == "" { - return startLine - } - newlineCount := strings.Count(text, "\n") - if newlineCount == 0 { - return startLine - } - if strings.HasSuffix(text, "\n") { - return startLine + newlineCount - 1 - } - return startLine + newlineCount -} - -func applyMCPTextPatch(content string, patch mcpFilePatchOp) (string, map[string]any, error) { - switch patch.Op { - case "replace": - if patch.Old == "" { - return "", nil, errors.New("replace patch requires old") - } - match, err := findSingleTextMatch(content, patch.Old, patch.ContextBefore, patch.ContextAfter, patch.StartLine, patch.EndLine) - if err != nil { - return "", nil, err - } - return content[:match.Start] + patch.New + content[match.End:], map[string]any{ - "op": patch.Op, - "start_line": match.StartLine, - "end_line": match.EndLine, - }, nil - case "insert": - if patch.New == "" { - return "", nil, errors.New("insert patch requires new") - } - if patch.StartLine == nil { - return "", nil, errors.New("insert patch requires start_line") - } - insertOffset, actualLine, err := insertOffsetForLine(content, *patch.StartLine) - if err != nil { - return "", nil, err - } - if patch.ContextBefore != "" && !strings.HasSuffix(content[:insertOffset], patch.ContextBefore) { - return "", nil, errors.New("insert patch context_before did not match") - } - if patch.ContextAfter != "" && !strings.HasPrefix(content[insertOffset:], patch.ContextAfter) { - return "", nil, errors.New("insert patch context_after did not match") - } - return content[:insertOffset] + patch.New + content[insertOffset:], map[string]any{ - "op": patch.Op, - "start_line": actualLine, - }, nil - case "delete": - switch { - case patch.Old != "": - match, err := findSingleTextMatch(content, patch.Old, patch.ContextBefore, patch.ContextAfter, patch.StartLine, patch.EndLine) - if err != nil { - return "", nil, err - } - return content[:match.Start] + content[match.End:], map[string]any{ - "op": patch.Op, - "start_line": match.StartLine, - "end_line": match.EndLine, - }, nil - case patch.StartLine != nil && patch.EndLine != nil: - next, deleted, err := deleteContentLines(content, *patch.StartLine, *patch.EndLine) - if err != nil { - return "", nil, err - } - return next, map[string]any{ - "op": patch.Op, - "start_line": *patch.StartLine, - "end_line": *patch.EndLine, - "deleted_lines": deleted, - }, nil - default: - return "", nil, errors.New("delete patch requires old or both start_line and end_line") - } - default: - return "", nil, fmt.Errorf("unsupported patch op %q", patch.Op) - } -} - -func insertOffsetForLine(content string, startLine int) (int, int, error) { - if startLine < -1 { - return 0, 0, errors.New("start_line must be >= -1") - } - if startLine == -1 { - return len(content), -1, nil - } - if startLine == 0 { - return 0, 0, nil - } - lines := splitTextLines(content) - if startLine > len(lines) { - return 0, 0, fmt.Errorf("start_line %d is beyond EOF", startLine) - } - offset := 0 - for i := 0; i < startLine; i++ { - offset += len(lines[i]) - } - return offset, startLine, nil -} - -func deleteContentLines(content string, start, end int) (string, int, error) { - if start <= 0 || end < start { - return "", 0, errors.New("start_line and end_line must be >= 1 and end_line must be >= start_line") - } - lines := splitTextLines(content) - if start > len(lines) { - return "", 0, fmt.Errorf("start_line %d is beyond EOF", start) - } - if end > len(lines) { - end = len(lines) - } - next := strings.Join(append(lines[:start-1], lines[end:]...), "") - return next, end - start + 1, nil -} - -func splitTextLines(content string) []string { - if content == "" { - return []string{} - } - lines := strings.SplitAfter(content, "\n") - if len(lines) > 0 && lines[len(lines)-1] == "" { - lines = lines[:len(lines)-1] - } - return lines -} - func readWorkspaceTextContent(ctx context.Context, fsClient afsclient.Client, normalizedPath string, stat *afsclient.StatResult) (string, error) { if stat.Type != "file" { return "", fmt.Errorf("path %q is not a regular file", normalizedPath) @@ -584,11 +291,11 @@ func hostedMCPGrepFileMatchCount(content []byte, opts hostedMCPGrepOptions, matc return count } -func hostedMCPCollectGrepMatches(filePath string, content []byte, opts hostedMCPGrepOptions, matcher *hostedMCPGrepMatcher) []mcpGrepMatch { +func hostedMCPCollectGrepMatches(filePath string, content []byte, opts hostedMCPGrepOptions, matcher *hostedMCPGrepMatcher) []mcptools.GrepMatch { if grepBinaryPrefix(content) { return nil } - out := make([]mcpGrepMatch, 0) + out := make([]mcptools.GrepMatch, 0) lines := splitTextLines(string(content)) for idx, line := range lines { text := strings.TrimSuffix(line, "\n") @@ -599,7 +306,7 @@ func hostedMCPCollectGrepMatches(filePath string, content []byte, opts hostedMCP if !matched { continue } - out = append(out, mcpGrepMatch{ + out = append(out, mcptools.GrepMatch{ Path: filePath, Line: int64(idx + 1), Text: text, diff --git a/internal/controlplane/service.go b/internal/controlplane/service.go index 24445d8..e52732a 100644 --- a/internal/controlplane/service.go +++ b/internal/controlplane/service.go @@ -375,7 +375,6 @@ type Service struct { catalogDatabaseName string } -type Capabilities = capabilities type WorkspaceSummary = workspaceSummary type WorkspaceListResponse = workspaceListResponse type CheckpointSummary = checkpointSummary diff --git a/internal/mcptools/mcptools.go b/internal/mcptools/mcptools.go new file mode 100644 index 0000000..0cf96a6 --- /dev/null +++ b/internal/mcptools/mcptools.go @@ -0,0 +1,501 @@ +// Package mcptools holds the text-patching, argument-parsing, and +// shared-shape helpers used by both MCP surfaces in this repo: +// +// - cmd/afs/afs_mcp.go — the local stdio MCP server +// - internal/controlplane/mcp_* — the hosted control-plane MCP +// +// These helpers were duplicated byte-for-byte across the two packages +// before this extraction. Changes here apply to both surfaces. +package mcptools + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strings" +) + +// ----- Shared shapes ----- + +// FileListItem is a workspace file/dir entry returned by file_list and +// file_glob. +type FileListItem struct { + Path string `json:"path"` + Name string `json:"name"` + Kind string `json:"kind"` + Size int64 `json:"size"` + ModifiedAt string `json:"modified_at,omitempty"` + Target string `json:"target,omitempty"` +} + +// GrepMatch is a single grep hit returned by file_grep. +type GrepMatch struct { + Path string `json:"path"` + Line int64 `json:"line,omitempty"` + Text string `json:"text"` + Binary bool `json:"binary,omitempty"` +} + +// GrepCount is a per-file count returned by file_grep with count_only. +type GrepCount struct { + Path string `json:"path"` + Count int `json:"count"` +} + +// FilePatchOp describes one operation in a file_patch tool call. +type FilePatchOp struct { + Op string `json:"op"` + StartLine *int `json:"start_line,omitempty"` + EndLine *int `json:"end_line,omitempty"` + Old string `json:"old,omitempty"` + New string `json:"new,omitempty"` + ContextBefore string `json:"context_before,omitempty"` + ContextAfter string `json:"context_after,omitempty"` +} + +// FilePatchInput is the request body for the file_patch tool. +type FilePatchInput struct { + Path string `json:"path"` + ExpectedSHA256 string `json:"expected_sha256,omitempty"` + Patches []FilePatchOp `json:"patches"` +} + +// TextMatch is the position of a single matched substring within a file's +// text content. +type TextMatch struct { + Start int + End int + StartLine int + EndLine int +} + +// ----- Argument parsers ----- + +// RequiredString returns a non-empty trimmed string, or an error if the +// argument is missing, the wrong type, or blank after trimming. +func RequiredString(args map[string]any, key string) (string, error) { + value, ok := args[key] + if !ok { + return "", fmt.Errorf("missing required argument %q", key) + } + text, ok := value.(string) + if !ok { + return "", fmt.Errorf("argument %q must be a string", key) + } + text = strings.TrimSpace(text) + if text == "" { + return "", fmt.Errorf("argument %q must not be empty", key) + } + return text, nil +} + +// RequiredText returns a string argument without trimming. When +// allowEmpty is false, blank-after-trimming counts as missing. +func RequiredText(args map[string]any, key string, allowEmpty bool) (string, error) { + value, ok := args[key] + if !ok { + return "", fmt.Errorf("missing required argument %q", key) + } + text, ok := value.(string) + if !ok { + return "", fmt.Errorf("argument %q must be a string", key) + } + if !allowEmpty && strings.TrimSpace(text) == "" { + return "", fmt.Errorf("argument %q must not be empty", key) + } + return text, nil +} + +// OptionalString returns a trimmed string, "" if the argument is absent. +func OptionalString(args map[string]any, key string) (string, error) { + value, ok := args[key] + if !ok || value == nil { + return "", nil + } + text, ok := value.(string) + if !ok { + return "", fmt.Errorf("argument %q must be a string", key) + } + return strings.TrimSpace(text), nil +} + +// OptionalText returns the raw string without trimming, "" if absent. +func OptionalText(args map[string]any, key string) (string, error) { + value, ok := args[key] + if !ok || value == nil { + return "", nil + } + text, ok := value.(string) + if !ok { + return "", fmt.Errorf("argument %q must be a string", key) + } + return text, nil +} + +// StringDefault returns OptionalString or fallback if blank. +func StringDefault(args map[string]any, key, fallback string) (string, error) { + value, err := OptionalString(args, key) + if err != nil { + return "", err + } + if value == "" { + return fallback, nil + } + return value, nil +} + +// Bool returns a boolean argument or fallback if absent. +func Bool(args map[string]any, key string, fallback bool) (bool, error) { + value, ok := args[key] + if !ok || value == nil { + return fallback, nil + } + switch v := value.(type) { + case bool: + return v, nil + default: + return false, fmt.Errorf("argument %q must be a boolean", key) + } +} + +// Int returns an integer argument or fallback if absent. Accepts JSON +// number variants (float64), Go int, and int64. +func Int(args map[string]any, key string, fallback int) (int, error) { + value, ok := args[key] + if !ok || value == nil { + return fallback, nil + } + switch v := value.(type) { + case float64: + return int(v), nil + case int: + return v, nil + case int64: + return int(v), nil + default: + return 0, fmt.Errorf("argument %q must be an integer", key) + } +} + +// OptionalInt returns a *int that is nil when the argument is absent. +func OptionalInt(args map[string]any, key string) (*int, error) { + value, ok := args[key] + if !ok || value == nil { + return nil, nil + } + intValue, err := Int(args, key, 0) + if err != nil { + return nil, err + } + return &intValue, nil +} + +// OptionalInt64 returns a *int64 that is nil when the argument is absent. +func OptionalInt64(args map[string]any, key string) (*int64, error) { + value, ok := args[key] + if !ok || value == nil { + return nil, nil + } + switch v := value.(type) { + case float64: + result := int64(v) + return &result, nil + case int: + result := int64(v) + return &result, nil + case int64: + return &v, nil + default: + return nil, fmt.Errorf("argument %q must be an integer", key) + } +} + +// OptionalStringSlice returns a *[]string that is nil when the argument +// is absent. Accepts both []string and []any. +func OptionalStringSlice(args map[string]any, key string) (*[]string, error) { + value, ok := args[key] + if !ok || value == nil { + return nil, nil + } + switch typed := value.(type) { + case []string: + copied := append([]string(nil), typed...) + return &copied, nil + case []any: + values := make([]string, 0, len(typed)) + for _, item := range typed { + text, ok := item.(string) + if !ok { + return nil, fmt.Errorf("argument %q must be an array of strings", key) + } + values = append(values, text) + } + return &values, nil + default: + return nil, fmt.Errorf("argument %q must be an array of strings", key) + } +} + +// DecodeArgs round-trips an args map through JSON to populate a typed +// target struct. Convenient for tools that accept a richer body shape. +func DecodeArgs(args map[string]any, target any) error { + body, err := json.Marshal(args) + if err != nil { + return err + } + return json.Unmarshal(body, target) +} + +// ----- Text-patching helpers ----- + +// TextSHA256 returns the lowercase hex SHA-256 of content. Used as the +// expected_sha256 verifier on file_patch. +func TextSHA256(content string) string { + sum := sha256.Sum256([]byte(content)) + return hex.EncodeToString(sum[:]) +} + +// CountTextMatches counts occurrences of old in content that satisfy the +// optional surrounding-context and line-range constraints. +func CountTextMatches(content, old, contextBefore, contextAfter string, startLine, endLine *int) int { + if old == "" { + return 0 + } + count := 0 + offset := 0 + for { + index := strings.Index(content[offset:], old) + if index < 0 { + break + } + matchStart := offset + index + matchEnd := matchStart + len(old) + match := TextMatch{ + Start: matchStart, + End: matchEnd, + StartLine: LineNumberAtOffset(content, matchStart), + EndLine: TextEndLine(LineNumberAtOffset(content, matchStart), old), + } + if MatchMatchesConstraints(content, match, contextBefore, contextAfter, startLine, endLine) { + count++ + } + offset = matchStart + len(old) + } + return count +} + +// FindSingleTextMatch locates the unique occurrence of old satisfying the +// constraints, returning an error if zero or more than one match. +func FindSingleTextMatch(content, old, contextBefore, contextAfter string, startLine, endLine *int) (TextMatch, error) { + if old == "" { + return TextMatch{}, errors.New("old text must not be empty") + } + var ( + match TextMatch + found bool + offset int + count int + ) + for { + index := strings.Index(content[offset:], old) + if index < 0 { + break + } + matchStart := offset + index + matchEnd := matchStart + len(old) + current := TextMatch{ + Start: matchStart, + End: matchEnd, + StartLine: LineNumberAtOffset(content, matchStart), + EndLine: TextEndLine(LineNumberAtOffset(content, matchStart), old), + } + if MatchMatchesConstraints(content, current, contextBefore, contextAfter, startLine, endLine) { + match = current + found = true + count++ + } + offset = matchStart + len(old) + } + switch { + case !found: + return TextMatch{}, errors.New("target text not found with the requested constraints") + case count > 1: + return TextMatch{}, fmt.Errorf("target text matched %d times; refine with start_line or surrounding context", count) + default: + return match, nil + } +} + +// MatchMatchesConstraints reports whether match satisfies the optional +// startLine, endLine, contextBefore, contextAfter constraints. +func MatchMatchesConstraints(content string, match TextMatch, contextBefore, contextAfter string, startLine, endLine *int) bool { + if startLine != nil && match.StartLine != *startLine { + return false + } + if endLine != nil && match.EndLine != *endLine { + return false + } + if contextBefore != "" && !strings.HasSuffix(content[:match.Start], contextBefore) { + return false + } + if contextAfter != "" && !strings.HasPrefix(content[match.End:], contextAfter) { + return false + } + return true +} + +// LineNumberAtOffset returns the 1-indexed line number containing the +// given byte offset in content. +func LineNumberAtOffset(content string, offset int) int { + if offset <= 0 { + return 1 + } + return strings.Count(content[:offset], "\n") + 1 +} + +// TextEndLine returns the 1-indexed line number that contains the last +// character of text when it begins on startLine. +func TextEndLine(startLine int, text string) int { + if text == "" { + return startLine + } + newlineCount := strings.Count(text, "\n") + if newlineCount == 0 { + return startLine + } + if strings.HasSuffix(text, "\n") { + return startLine + newlineCount - 1 + } + return startLine + newlineCount +} + +// ApplyTextPatch applies one FilePatchOp to content and returns the new +// content along with a metadata map describing what changed. +func ApplyTextPatch(content string, patch FilePatchOp) (string, map[string]any, error) { + switch patch.Op { + case "replace": + if patch.Old == "" { + return "", nil, errors.New("replace patch requires old") + } + match, err := FindSingleTextMatch(content, patch.Old, patch.ContextBefore, patch.ContextAfter, patch.StartLine, patch.EndLine) + if err != nil { + return "", nil, err + } + return content[:match.Start] + patch.New + content[match.End:], map[string]any{ + "op": patch.Op, + "start_line": match.StartLine, + "end_line": match.EndLine, + }, nil + case "insert": + if patch.New == "" { + return "", nil, errors.New("insert patch requires new") + } + if patch.StartLine == nil { + return "", nil, errors.New("insert patch requires start_line") + } + insertOffset, actualLine, err := InsertOffsetForLine(content, *patch.StartLine) + if err != nil { + return "", nil, err + } + if patch.ContextBefore != "" && !strings.HasSuffix(content[:insertOffset], patch.ContextBefore) { + return "", nil, errors.New("insert patch context_before did not match") + } + if patch.ContextAfter != "" && !strings.HasPrefix(content[insertOffset:], patch.ContextAfter) { + return "", nil, errors.New("insert patch context_after did not match") + } + return content[:insertOffset] + patch.New + content[insertOffset:], map[string]any{ + "op": patch.Op, + "start_line": actualLine, + }, nil + case "delete": + switch { + case patch.Old != "": + match, err := FindSingleTextMatch(content, patch.Old, patch.ContextBefore, patch.ContextAfter, patch.StartLine, patch.EndLine) + if err != nil { + return "", nil, err + } + return content[:match.Start] + content[match.End:], map[string]any{ + "op": patch.Op, + "start_line": match.StartLine, + "end_line": match.EndLine, + }, nil + case patch.StartLine != nil && patch.EndLine != nil: + next, deleted, err := DeleteContentLines(content, *patch.StartLine, *patch.EndLine) + if err != nil { + return "", nil, err + } + return next, map[string]any{ + "op": patch.Op, + "start_line": *patch.StartLine, + "end_line": *patch.EndLine, + "deleted_lines": deleted, + }, nil + default: + return "", nil, errors.New("delete patch requires old or both start_line and end_line") + } + default: + return "", nil, fmt.Errorf("unsupported patch op %q", patch.Op) + } +} + +// InsertOffsetForLine returns the byte offset and resolved line number +// at which a startLine-positioned insert should land. +// +// startLine semantics: +// - 0 → insert at byte 0 (before everything) +// - -1 → insert at end of file +// - >0 → insert before line N, where lines are 1-indexed +func InsertOffsetForLine(content string, startLine int) (int, int, error) { + if startLine < -1 { + return 0, 0, errors.New("start_line must be >= -1") + } + if startLine == -1 { + return len(content), -1, nil + } + if startLine == 0 { + return 0, 0, nil + } + lines := SplitTextLines(content) + if startLine > len(lines) { + return 0, 0, fmt.Errorf("start_line %d is beyond EOF", startLine) + } + offset := 0 + for i := 0; i < startLine; i++ { + offset += len(lines[i]) + } + return offset, startLine, nil +} + +// DeleteContentLines removes lines [start..end] (inclusive, 1-indexed) +// from content and returns the new content along with the count of lines +// actually removed. +func DeleteContentLines(content string, start, end int) (string, int, error) { + if start <= 0 || end < start { + return "", 0, errors.New("start_line and end_line must be >= 1 and end_line must be >= start_line") + } + lines := SplitTextLines(content) + if start > len(lines) { + return "", 0, fmt.Errorf("start_line %d is beyond EOF", start) + } + if end > len(lines) { + end = len(lines) + } + next := strings.Join(append(lines[:start-1], lines[end:]...), "") + return next, end - start + 1, nil +} + +// SplitTextLines splits content into individual lines preserving +// trailing newlines, so reassembly via strings.Join(..., "") returns +// the original text exactly. +func SplitTextLines(content string) []string { + if content == "" { + return []string{} + } + lines := strings.SplitAfter(content, "\n") + if len(lines) > 0 && lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} diff --git a/internal/mcptools/mcptools_test.go b/internal/mcptools/mcptools_test.go new file mode 100644 index 0000000..9045767 --- /dev/null +++ b/internal/mcptools/mcptools_test.go @@ -0,0 +1,161 @@ +package mcptools + +import "testing" + +func TestRequiredString(t *testing.T) { + got, err := RequiredString(map[string]any{"k": " hi "}, "k") + if err != nil { + t.Fatalf("RequiredString returned err: %v", err) + } + if got != "hi" { + t.Fatalf("RequiredString = %q, want %q", got, "hi") + } + + if _, err := RequiredString(map[string]any{}, "k"); err == nil { + t.Fatal("RequiredString missing-key returned no error") + } + if _, err := RequiredString(map[string]any{"k": " "}, "k"); err == nil { + t.Fatal("RequiredString blank returned no error") + } + if _, err := RequiredString(map[string]any{"k": 42}, "k"); err == nil { + t.Fatal("RequiredString wrong-type returned no error") + } +} + +func TestOptionalIntCoerces(t *testing.T) { + cases := []struct { + name string + val any + want int + }{ + {"float64", float64(7), 7}, + {"int", int(7), 7}, + {"int64", int64(7), 7}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ptr, err := OptionalInt(map[string]any{"k": tc.val}, "k") + if err != nil { + t.Fatalf("OptionalInt(%v) err: %v", tc.val, err) + } + if ptr == nil || *ptr != tc.want { + t.Fatalf("OptionalInt(%v) = %v, want %d", tc.val, ptr, tc.want) + } + }) + } + + ptr, err := OptionalInt(map[string]any{}, "k") + if err != nil || ptr != nil { + t.Fatalf("OptionalInt absent: ptr=%v err=%v", ptr, err) + } +} + +func TestOptionalStringSliceAcceptsBothShapes(t *testing.T) { + ptr, err := OptionalStringSlice(map[string]any{"k": []string{"a", "b"}}, "k") + if err != nil || ptr == nil || len(*ptr) != 2 || (*ptr)[0] != "a" { + t.Fatalf("[]string shape: ptr=%v err=%v", ptr, err) + } + + ptr, err = OptionalStringSlice(map[string]any{"k": []any{"a", "b"}}, "k") + if err != nil || ptr == nil || len(*ptr) != 2 || (*ptr)[1] != "b" { + t.Fatalf("[]any shape: ptr=%v err=%v", ptr, err) + } + + if _, err := OptionalStringSlice(map[string]any{"k": []any{1, 2}}, "k"); err == nil { + t.Fatal("OptionalStringSlice with non-strings returned no error") + } +} + +func TestSplitTextLinesPreservesNewlines(t *testing.T) { + in := "a\nb\nc" + lines := SplitTextLines(in) + if len(lines) != 3 { + t.Fatalf("got %d lines, want 3", len(lines)) + } + // Reassembly should round-trip exactly. + rebuilt := "" + for _, l := range lines { + rebuilt += l + } + if rebuilt != in { + t.Fatalf("round-trip mismatch: got %q want %q", rebuilt, in) + } + + // Trailing newline shouldn't add a phantom empty line. + lines = SplitTextLines("a\n") + if len(lines) != 1 || lines[0] != "a\n" { + t.Fatalf("a\\n -> %v, want [\"a\\n\"]", lines) + } + + if got := SplitTextLines(""); len(got) != 0 { + t.Fatalf("empty -> %v, want []", got) + } +} + +func TestApplyTextPatchReplace(t *testing.T) { + original := "hello world\nfoo bar\n" + out, meta, err := ApplyTextPatch(original, FilePatchOp{ + Op: "replace", + Old: "world", + New: "everyone", + }) + if err != nil { + t.Fatalf("ApplyTextPatch returned err: %v", err) + } + want := "hello everyone\nfoo bar\n" + if out != want { + t.Fatalf("ApplyTextPatch = %q, want %q", out, want) + } + if meta["op"] != "replace" { + t.Fatalf("meta[op] = %v, want replace", meta["op"]) + } +} + +func TestApplyTextPatchAmbiguousReplaceErrors(t *testing.T) { + _, _, err := ApplyTextPatch("foo\nfoo\n", FilePatchOp{Op: "replace", Old: "foo", New: "bar"}) + if err == nil { + t.Fatal("expected error for ambiguous replace, got nil") + } +} + +func TestApplyTextPatchInsertAtLine(t *testing.T) { + original := "line1\nline2\nline3\n" + startLine := 1 + out, _, err := ApplyTextPatch(original, FilePatchOp{ + Op: "insert", + StartLine: &startLine, + New: "inserted\n", + }) + if err != nil { + t.Fatalf("ApplyTextPatch returned err: %v", err) + } + want := "line1\ninserted\nline2\nline3\n" + if out != want { + t.Fatalf("ApplyTextPatch = %q, want %q", out, want) + } +} + +func TestApplyTextPatchDeleteLines(t *testing.T) { + original := "line1\nline2\nline3\nline4\n" + start, end := 2, 3 + out, _, err := ApplyTextPatch(original, FilePatchOp{ + Op: "delete", + StartLine: &start, + EndLine: &end, + }) + if err != nil { + t.Fatalf("ApplyTextPatch returned err: %v", err) + } + want := "line1\nline4\n" + if out != want { + t.Fatalf("ApplyTextPatch = %q, want %q", out, want) + } +} + +func TestTextSHA256IsStable(t *testing.T) { + got := TextSHA256("hello") + want := "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824" + if got != want { + t.Fatalf("TextSHA256(%q) = %s, want %s", "hello", got, want) + } +} diff --git a/mount/internal/afsfs/errors.go b/mount/internal/afsfs/errors.go index 6ddc7e0..dfc83cd 100644 --- a/mount/internal/afsfs/errors.go +++ b/mount/internal/afsfs/errors.go @@ -1,17 +1,44 @@ package afsfs import ( + "errors" "strings" "syscall" + + "github.com/redis/agent-filesystem/mount/internal/client" ) -// mapError maps an AFS error to a syscall errno. +// mapError maps an AFS client error to a syscall errno. Sentinel checks +// (errors.Is) take precedence so a wrapped error +// (fmt.Errorf("...: %w", client.ErrNotFile)) still maps correctly. The +// substring fallback exists because some errors arrive from outside the +// client package — Redis "ERR ..." replies, redis-go errors, etc. — that +// can't carry one of our sentinels but still mention the same condition. func mapError(err error) syscall.Errno { if err == nil { return 0 } - msg := err.Error() + switch { + case errors.Is(err, client.ErrNotFound): + return syscall.ENOENT + case errors.Is(err, client.ErrNotFile), + errors.Is(err, client.ErrCannotWriteRoot): + return syscall.EISDIR + case errors.Is(err, client.ErrNotDir), + errors.Is(err, client.ErrParentConflict): + return syscall.ENOTDIR + case errors.Is(err, client.ErrAlreadyExists): + return syscall.EEXIST + case errors.Is(err, client.ErrDirNotEmpty): + return syscall.ENOTEMPTY + case errors.Is(err, client.ErrUnsupported): + return syscall.ENOTSUP + case errors.Is(err, client.ErrLockWouldBlock): + return syscall.EAGAIN + } + + msg := err.Error() switch { case strings.Contains(msg, "no such filesystem key"), strings.Contains(msg, "no such file or directory"), diff --git a/mount/internal/afsfs/errors_test.go b/mount/internal/afsfs/errors_test.go index 940d2ca..61bff6e 100644 --- a/mount/internal/afsfs/errors_test.go +++ b/mount/internal/afsfs/errors_test.go @@ -2,8 +2,11 @@ package afsfs import ( "errors" + "fmt" "syscall" "testing" + + "github.com/redis/agent-filesystem/mount/internal/client" ) func TestMapError(t *testing.T) { @@ -32,3 +35,33 @@ func TestMapError(t *testing.T) { } } } + +// TestMapErrorSentinels confirms mapError matches client sentinels via +// errors.Is, including when the caller wraps the sentinel with extra +// context. The substring fallback would silently break under wrapping; +// this test ensures the principled path doesn't. +func TestMapErrorSentinels(t *testing.T) { + cases := []struct { + name string + err error + want syscall.Errno + }{ + {"NotFound", client.ErrNotFound, syscall.ENOENT}, + {"NotFile", client.ErrNotFile, syscall.EISDIR}, + {"NotDir", client.ErrNotDir, syscall.ENOTDIR}, + {"AlreadyExists", client.ErrAlreadyExists, syscall.EEXIST}, + {"DirNotEmpty", client.ErrDirNotEmpty, syscall.ENOTEMPTY}, + {"Unsupported", client.ErrUnsupported, syscall.ENOTSUP}, + {"LockWouldBlock", client.ErrLockWouldBlock, syscall.EAGAIN}, + {"WrappedNotFile", fmt.Errorf("inode 42: %w", client.ErrNotFile), syscall.EISDIR}, + {"WrappedAlreadyExists", fmt.Errorf("create %q: %w", "/foo", client.ErrAlreadyExists), syscall.EEXIST}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if got := mapError(tc.err); got != tc.want { + t.Fatalf("mapError(%v) = %d, want %d", tc.err, got, tc.want) + } + }) + } +} diff --git a/mount/internal/afsfs/file.go b/mount/internal/afsfs/file.go index db754c1..176830c 100644 --- a/mount/internal/afsfs/file.go +++ b/mount/internal/afsfs/file.go @@ -32,7 +32,9 @@ func (n *FSNode) Create(ctx context.Context, name string, flags uint32, mode uin handle := newFileHandle(child.fsPath, st.Inode, n.client, child) if flags&syscall.O_TRUNC != 0 { - if err := n.client.TruncateInode(ctx, st.Inode, 0); err != nil { + // *AtPath updates the path cache in place; the no-path variant flushes + // the whole cache. + if err := n.client.TruncateInodeAtPath(ctx, st.Inode, child.fsPath, 0); err != nil { return nil, nil, 0, mapError(err) } n.root().invalidatePath(child.fsPath) @@ -58,7 +60,9 @@ func (n *FSNode) Open(ctx context.Context, flags uint32) (fs.FileHandle, uint32, handle := newFileHandle(n.fsPath, st.Inode, n.client, n) if flags&syscall.O_TRUNC != 0 { - if err := n.client.TruncateInode(ctx, st.Inode, 0); err != nil { + // *AtPath updates the path cache in place; the no-path variant flushes + // the whole cache. + if err := n.client.TruncateInodeAtPath(ctx, st.Inode, n.fsPath, 0); err != nil { return nil, 0, mapError(err) } n.root().invalidatePath(n.fsPath) diff --git a/mount/internal/afsfs/fs.go b/mount/internal/afsfs/fs.go index b3e5855..25d4d24 100644 --- a/mount/internal/afsfs/fs.go +++ b/mount/internal/afsfs/fs.go @@ -389,27 +389,6 @@ func GetOwnership() (uint32, uint32) { return uint32(os.Getuid()), uint32(os.Getgid()) } -// parentPath returns the parent dir of a path. -func parentPath(p string) string { - if p == "/" { - return "/" - } - parent := filepath.Dir(p) - if parent == "." { - return "/" - } - return parent -} - -// baseName returns the last component of a path. -func baseName(p string) string { - if p == "/" { - return "" - } - parts := strings.Split(p, "/") - return parts[len(parts)-1] -} - // Ensure interfaces are satisfied. var _ fs.NodeStatfser = (*FSNode)(nil) var _ fs.NodeGetattrer = (*FSNode)(nil) diff --git a/mount/internal/afsfs/handle.go b/mount/internal/afsfs/handle.go index c31ad58..fde4cf9 100644 --- a/mount/internal/afsfs/handle.go +++ b/mount/internal/afsfs/handle.go @@ -51,7 +51,10 @@ func (fh *FileHandle) Write(ctx context.Context, data []byte, off int64) (uint32 fh.mu.Lock() defer fh.mu.Unlock() - if err := fh.client.WriteInodeAt(ctx, fh.inode, data, off); err != nil { + // Use *AtPath so the per-path attribute cache is updated in place rather + // than wiped. Without this, every FUSE write triggered a whole-cache + // invalidatePrefix("/") inside finishRangeWriteCache. + if err := fh.client.WriteInodeAtPath(ctx, fh.inode, fh.path, data, off); err != nil { return 0, mapError(err) } fh.node.root().invalidatePath(fh.path) @@ -96,9 +99,6 @@ func (fh *FileHandle) Setlk(ctx context.Context, owner uint64, lk *fuse.FileLock if err == context.Canceled || err == context.DeadlineExceeded { return syscall.EINTR } - if err.Error() == "lock would block" { - return syscall.EAGAIN - } return mapError(err) } @@ -110,9 +110,6 @@ func (fh *FileHandle) Setlkw(ctx context.Context, owner uint64, lk *fuse.FileLoc if err == context.Canceled || err == context.DeadlineExceeded { return syscall.EINTR } - if err.Error() == "lock would block" { - return syscall.EAGAIN - } return mapError(err) } diff --git a/mount/internal/client/errors.go b/mount/internal/client/errors.go new file mode 100644 index 0000000..a13ea35 --- /dev/null +++ b/mount/internal/client/errors.go @@ -0,0 +1,23 @@ +package client + +import "errors" + +// Sentinel errors returned by the filesystem client. Callers (notably +// the FUSE/NFS error mappers) compare against these via errors.Is so a +// caller wrapping the error with extra context (fmt.Errorf("...: %w", err)) +// stays mappable — the previous strings.Contains(err.Error(), "...") +// approach silently broke as soon as a wrapper rephrased the message. +var ( + ErrNotFound = errors.New("no such file or directory") + ErrNotFile = errors.New("not a file") + ErrNotDir = errors.New("not a directory") + ErrNotSymlink = errors.New("not a symlink") + ErrAlreadyExists = errors.New("already exists") + ErrDirNotEmpty = errors.New("directory not empty") + ErrUnsupported = errors.New("operation not supported") + ErrCannotWriteRoot = errors.New("cannot write to root") + ErrCannotMoveRoot = errors.New("cannot move root") + ErrCannotRemoveRoot = errors.New("cannot remove root") + ErrParentConflict = errors.New("parent path conflict") + ErrLockWouldBlock = errors.New("lock would block") +) diff --git a/mount/internal/client/native_core.go b/mount/internal/client/native_core.go index 6e5a586..e7574fc 100644 --- a/mount/internal/client/native_core.go +++ b/mount/internal/client/native_core.go @@ -426,7 +426,7 @@ func (c *nativeClient) Cat(ctx context.Context, p string) ([]byte, error) { return nil, err } if inode.Type != "file" { - return nil, errors.New("not a file") + return nil, ErrNotFile } content, err := c.loadContentExternal(ctx, inode.ID, inode.ContentRef) if err != nil { @@ -450,7 +450,7 @@ func (c *nativeClient) EchoAppend(ctx context.Context, p string, data []byte) er func (c *nativeClient) Touch(ctx context.Context, p string) error { p = normalizePath(p) if p == "/" { - return errors.New("cannot write to root") + return ErrCannotWriteRoot } if err := c.ensureParents(ctx, p); err != nil { return err @@ -465,7 +465,7 @@ func (c *nativeClient) Touch(ctx context.Context, p string) error { return err } if inode.Type != "file" { - return errors.New("not a file") + return ErrNotFile } now := nowMs() inode.MtimeMs = now @@ -480,7 +480,7 @@ func (c *nativeClient) Touch(ctx context.Context, p string) error { func (c *nativeClient) CreateFile(ctx context.Context, p string, mode uint32, exclusive bool) (*StatResult, bool, error) { p = normalizePath(p) if p == "/" { - return nil, false, errors.New("cannot write to root") + return nil, false, ErrCannotWriteRoot } if err := c.ensureParents(ctx, p); err != nil { return nil, false, err @@ -522,7 +522,7 @@ func (c *nativeClient) Mkdir(ctx context.Context, p string) error { if existing.Type == "dir" { return nil } - return errors.New("already exists") + return ErrAlreadyExists } if err := c.createDir(ctx, p, 0o755); err != nil { return err @@ -533,7 +533,7 @@ func (c *nativeClient) Mkdir(ctx context.Context, p string) error { func (c *nativeClient) Rm(ctx context.Context, p string) error { p = normalizePath(p) if p == "/" { - return errors.New("cannot remove root") + return ErrCannotRemoveRoot } resolved, inode, err := c.resolvePath(ctx, p, false) if err != nil { @@ -573,7 +573,7 @@ func (c *nativeClient) Rm(ctx context.Context, p string) error { c.invalidateInode(ctx, resolved) } if liveChildren > 0 { - return errors.New("directory not empty") + return ErrDirNotEmpty } } } @@ -631,7 +631,7 @@ func (c *nativeClient) LsLong(ctx context.Context, p string) ([]LsEntry, error) return nil, err } if inode.Type != "dir" { - return nil, errors.New("not a directory") + return nil, ErrNotDir } children, err := c.listDirChildren(ctx, resolved, inode) @@ -659,16 +659,16 @@ func (c *nativeClient) Rename(ctx context.Context, src, dst string, flags uint32 src = normalizePath(src) dst = normalizePath(dst) if src == "/" { - return errors.New("cannot move root") + return ErrCannotMoveRoot } if src == dst { return nil } if flags&RenameExchange != 0 { - return errors.New("operation not supported") + return ErrUnsupported } if flags&^RenameNoreplace != 0 { - return errors.New("operation not supported") + return ErrUnsupported } resolvedSrc, srcInode, err := c.resolvePath(ctx, src, false) @@ -688,7 +688,7 @@ func (c *nativeClient) Rename(ctx context.Context, src, dst string, flags uint32 } _ = resolvedParent if newParent.Type != "dir" { - return errors.New("parent path conflict") + return ErrParentConflict } if err := c.renamePath(ctx, resolvedSrc, srcInode, dst, newParent, flags); err != nil { @@ -716,7 +716,7 @@ func (c *nativeClient) Mv(ctx context.Context, src, dst string) error { func (c *nativeClient) Ln(ctx context.Context, target, linkpath string) error { linkpath = normalizePath(linkpath) if linkpath == "/" { - return errors.New("already exists") + return ErrAlreadyExists } if err := c.ensureParents(ctx, linkpath); err != nil { return err @@ -726,7 +726,7 @@ func (c *nativeClient) Ln(ctx context.Context, target, linkpath string) error { return err } if existing != nil { - return errors.New("already exists") + return ErrAlreadyExists } now := nowMs() inode := &inodeData{ @@ -762,7 +762,7 @@ func (c *nativeClient) Readlink(ctx context.Context, p string) (string, error) { return "", err } if inode.Type != "symlink" { - return "", errors.New("not a symlink") + return "", ErrNotSymlink } return inode.Target, nil } @@ -823,7 +823,7 @@ func (c *nativeClient) Truncate(ctx context.Context, p string, size int64) error return snapErr } if inode.Type != "file" { - return errors.New("not a file") + return ErrNotFile } var content []byte @@ -978,7 +978,7 @@ func (c *nativeClient) Info(ctx context.Context) (*InfoResult, error) { func (c *nativeClient) writeFileWithMode(ctx context.Context, p string, data []byte, mode uint32) error { p = normalizePath(p) if p == "/" { - return errors.New("cannot write to root") + return ErrCannotWriteRoot } if err := c.ensureParents(ctx, p); err != nil { return err @@ -991,7 +991,7 @@ func (c *nativeClient) writeFileWithMode(ctx context.Context, p string, data []b return err } if inode.Type != "file" { - return errors.New("not a file") + return ErrNotFile } beforeSnapshot, snapErr := c.versionedSnapshotFromResolved(ctx, resolved, inode) if snapErr != nil { @@ -1023,7 +1023,7 @@ func (c *nativeClient) writeFileWithMode(ctx context.Context, p string, data []b func (c *nativeClient) writeFile(ctx context.Context, p string, data []byte, appendMode bool) error { p = normalizePath(p) if p == "/" { - return errors.New("cannot write to root") + return ErrCannotWriteRoot } resolved, inode, err := c.resolvePath(ctx, p, true) if err != nil { @@ -1036,7 +1036,7 @@ func (c *nativeClient) writeFile(ctx context.Context, p string, data []byte, app return err } if inode.Type != "file" { - return errors.New("not a file") + return ErrNotFile } beforeSnapshot, snapErr := c.versionedSnapshotFromResolved(ctx, resolved, inode) if snapErr != nil { @@ -1150,7 +1150,7 @@ func (c *nativeClient) WriteChunks(ctx context.Context, p string, chunks map[int return err } if inode.Type != "file" { - return errors.New("not a file") + return ErrNotFile } beforeSnapshot, snapErr := c.versionedSnapshotFromResolved(ctx, resolved, inode) if snapErr != nil { @@ -1328,7 +1328,7 @@ func (c *nativeClient) ReadChunks(ctx context.Context, p string, indices []int, return nil, err } if inode.Type != "file" { - return nil, errors.New("not a file") + return nil, ErrNotFile } if inode.ContentRef == rediscontent.RefArray { result := make(map[int][]byte, len(indices)) @@ -1372,7 +1372,7 @@ func (c *nativeClient) ChunkMeta(ctx context.Context, p string) (int, []string, return 0, nil, err } if inode.Type != "file" { - return 0, nil, errors.New("not a file") + return 0, nil, ErrNotFile } vals, err := c.rdb.HMGet(ctx, c.keys.inode(inode.ID), "chunk_size", "chunk_hashes").Result() diff --git a/mount/internal/client/native_helpers.go b/mount/internal/client/native_helpers.go index 609a466..61a9209 100644 --- a/mount/internal/client/native_helpers.go +++ b/mount/internal/client/native_helpers.go @@ -163,7 +163,7 @@ func (c *nativeClient) ensureParents(ctx context.Context, p string) error { return err } if child == nil || child.Type != "dir" { - return errors.New("parent path conflict") + return ErrParentConflict } c.cachePath(nextPath, child) curPath = nextPath @@ -292,7 +292,7 @@ func (c *nativeClient) resolvePath(ctx context.Context, p string, followFinal bo return nextPath, inode, nil } if inode.Type != "dir" { - return "", nil, errors.New("not a directory") + return "", nil, ErrNotDir } curPath = nextPath @@ -632,7 +632,7 @@ func (c *nativeClient) saveInodeDirect(ctx context.Context, inode *inodeData, in func (c *nativeClient) createInodeAtPath(ctx context.Context, p string, inode *inodeData, ensureParents bool) error { p = normalizePath(p) if p == "/" { - return errors.New("already exists") + return ErrAlreadyExists } if ensureParents { if err := c.ensureParents(ctx, p); err != nil { @@ -650,10 +650,10 @@ func (c *nativeClient) createInodeAtPath(ctx context.Context, p string, inode *i func (c *nativeClient) createInodeUnderParent(ctx context.Context, childPath string, parent *inodeData, name string, inode *inodeData) error { if parent == nil || parent.Type != "dir" { - return errors.New("parent path conflict") + return ErrParentConflict } if _, err := c.lookupChildID(ctx, parent.ID, name); err == nil { - return errors.New("already exists") + return ErrAlreadyExists } else if !errors.Is(err, redis.Nil) { return err } @@ -732,7 +732,7 @@ func (c *nativeClient) createInodeUnderParent(ctx context.Context, childPath str func (c *nativeClient) createFileIfMissing(ctx context.Context, p string, content string, mode uint32, exclusive bool) (*inodeData, bool, error) { p = normalizePath(p) if p == "/" { - return nil, false, errors.New("cannot write to root") + return nil, false, ErrCannotWriteRoot } parentPath := parentOf(p) _, parentInode, err := c.resolvePath(ctx, parentPath, true) @@ -740,7 +740,7 @@ func (c *nativeClient) createFileIfMissing(ctx context.Context, p string, conten return nil, false, err } if parentInode.Type != "dir" { - return nil, false, errors.New("parent path conflict") + return nil, false, ErrParentConflict } direntsKey := c.keys.dirents(parentInode.ID) @@ -818,7 +818,7 @@ func (c *nativeClient) createFileIfMissing(ctx context.Context, p string, conten } if exclusive { - return nil, false, errors.New("already exists") + return nil, false, ErrAlreadyExists } // Re-resolve the existing child. Use HGet directly (not lookupChildID) @@ -829,7 +829,7 @@ func (c *nativeClient) createFileIfMissing(ctx context.Context, p string, conten if errors.Is(err, redis.Nil) { // The winner removed the entry between HSETNX and our HGet. // Surface this as the same error the resolvePath family uses. - return nil, false, errors.New("no such file or directory") + return nil, false, ErrNotFound } return nil, false, err } @@ -838,10 +838,10 @@ func (c *nativeClient) createFileIfMissing(ctx context.Context, p string, conten return nil, false, err } if existing == nil { - return nil, false, errors.New("no such file or directory") + return nil, false, ErrNotFound } if existing.Type != "file" { - return nil, false, errors.New("not a file") + return nil, false, ErrNotFile } // Nothing in the parent directory actually changed from our // perspective, so we leave the parent's cache alone and only @@ -855,7 +855,7 @@ func (c *nativeClient) renamePath(ctx context.Context, resolvedSrc string, srcIn oldName := srcInode.Name newName := baseName(dst) if oldParentID == "" { - return errors.New("no such file or directory") + return ErrNotFound } var watchDstDirID string @@ -870,7 +870,7 @@ func (c *nativeClient) renamePath(ctx context.Context, resolvedSrc string, srcIn currentSrcID, err := tx.HGet(ctx, c.keys.dirents(oldParentID), oldName).Result() if err != nil { if errors.Is(err, redis.Nil) { - return errors.New("no such file or directory") + return ErrNotFound } return err } @@ -883,7 +883,7 @@ func (c *nativeClient) renamePath(ctx context.Context, resolvedSrc string, srcIn return err } if currentSrc == nil { - return errors.New("no such file or directory") + return ErrNotFound } var replaced *inodeData @@ -898,13 +898,13 @@ func (c *nativeClient) renamePath(ctx context.Context, resolvedSrc string, srcIn return redis.TxFailedErr } if flags&RenameNoreplace != 0 { - return errors.New("already exists") + return ErrAlreadyExists } if currentSrc.Type == "dir" && replaced.Type != "dir" { - return errors.New("not a directory") + return ErrNotDir } if currentSrc.Type != "dir" && replaced.Type == "dir" { - return errors.New("not a file") + return ErrNotFile } if replaced.Type == "dir" { if watchDstDirID != replaced.ID { @@ -916,7 +916,7 @@ func (c *nativeClient) renamePath(ctx context.Context, resolvedSrc string, srcIn return err } if count > 0 { - return errors.New("directory not empty") + return ErrDirNotEmpty } } case !errors.Is(err, redis.Nil): @@ -983,7 +983,7 @@ func (c *nativeClient) renamePath(ctx context.Context, resolvedSrc string, srcIn func (c *nativeClient) listDirChildren(ctx context.Context, dirPath string, dir *inodeData) ([]namedInode, error) { if dir == nil || dir.Type != "dir" { - return nil, errors.New("not a directory") + return nil, ErrNotDir } if c.cache != nil { if cached, ok := c.cache.Get(dirCacheKey(dirPath)); ok { diff --git a/mount/internal/client/native_locks.go b/mount/internal/client/native_locks.go index 1d3a90e..dc4f4e3 100644 --- a/mount/internal/client/native_locks.go +++ b/mount/internal/client/native_locks.go @@ -12,8 +12,6 @@ import ( "github.com/redis/go-redis/v9" ) -var errLockWouldBlock = errors.New("lock would block") - type storedFileLock struct { Start uint64 `json:"start"` End uint64 `json:"end"` @@ -64,7 +62,7 @@ func (c *nativeClient) Setlk(ctx context.Context, inode uint64, handleID string, switch { case err == nil: return nil - case errors.Is(err, errLockWouldBlock): + case errors.Is(err, ErrLockWouldBlock): select { case <-ctx.Done(): return ctx.Err() @@ -107,7 +105,7 @@ func (c *nativeClient) trySetLock(ctx context.Context, inode uint64, handleID st } for _, existing := range ownerLocks { if locksConflict(existing, *lk) { - return errLockWouldBlock + return ErrLockWouldBlock } } } diff --git a/mount/internal/client/native_range.go b/mount/internal/client/native_range.go index 4008e35..4d24c5c 100644 --- a/mount/internal/client/native_range.go +++ b/mount/internal/client/native_range.go @@ -32,10 +32,10 @@ func (c *nativeClient) ReadInodeAt(ctx context.Context, inode uint64, off int64, return nil, err } if data == nil { - return nil, errors.New("no such file or directory") + return nil, ErrNotFound } if data.Type != "file" { - return nil, errors.New("not a file") + return nil, ErrNotFile } if off >= data.Size { return []byte{}, nil @@ -93,10 +93,10 @@ func (c *nativeClient) WriteInodeAtPath(ctx context.Context, inode uint64, path return err } if data == nil { - return errors.New("no such file or directory") + return ErrNotFound } if data.Type != "file" { - return errors.New("not a file") + return ErrNotFile } beforeSnapshot, snapErr := c.versionedSnapshotForCurrentInode(ctx, data.ID, path) if snapErr != nil { @@ -246,10 +246,10 @@ func (c *nativeClient) TruncateInodeAtPath(ctx context.Context, inode uint64, pa return err } if data == nil { - return errors.New("no such file or directory") + return ErrNotFound } if data.Type != "file" { - return errors.New("not a file") + return ErrNotFile } beforeSnapshot, snapErr := c.versionedSnapshotForCurrentInode(ctx, data.ID, path) if snapErr != nil { diff --git a/mount/internal/nfsfs/fs.go b/mount/internal/nfsfs/fs.go index f270c14..cc39576 100644 --- a/mount/internal/nfsfs/fs.go +++ b/mount/internal/nfsfs/fs.go @@ -125,7 +125,7 @@ func (f *FS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, } created, _, err := f.client.CreateFile(ctx, p, uint32(perm.Perm()), flag&os.O_EXCL != 0) if err != nil { - if strings.Contains(err.Error(), "already exists") { + if errors.Is(err, client.ErrAlreadyExists) { return nil, os.ErrExist } return nil, err diff --git a/plans/redis-array-backend.md b/plans/archive/2026-05-05-redis-array-backend.md similarity index 99% rename from plans/redis-array-backend.md rename to plans/archive/2026-05-05-redis-array-backend.md index b444749..93824ce 100644 --- a/plans/redis-array-backend.md +++ b/plans/archive/2026-05-05-redis-array-backend.md @@ -1,9 +1,10 @@ # Redis Array Backend -Status: active +Status: archived Owner: codex Created: 2026-05-05 Updated: 2026-05-05 +Archived: 2026-05-05 ## Goal diff --git a/plans/cli-first-ui.md b/plans/cli-first-ui.md index 825acbd..744c5a7 100644 --- a/plans/cli-first-ui.md +++ b/plans/cli-first-ui.md @@ -69,18 +69,6 @@ These ship into `internal/controlplane/` independently. The inspector tolerates - [ ] SSE on `/v1/activity` and `/v1/sessions`. ~3 days each. - [ ] `POST /v1/auth/exchange` for same-origin Clerk → token bridge. ~2 days. -### Phase 4 — Embedded Console (abandoned) - -Tried to ship a Redis-Insight-style embedded terminal on the Inspector page (built and removed 2026-05-02). Strict-CLI-only commands, ghost-text autocomplete, plain-text output. Output was technically correct but the result didn't earn its place — felt like a half-shell rather than the protagonist. Removed in full. - -If we revisit: rather than partial-shell-in-the-browser, lean into the real CLI from a dedicated route (e.g. an inline-rendered `xterm.js` connected to a websocket-backed PTY running on the control plane in a sandbox). That would be a real terminal, not an emulation. Not on the current branch. - -### Phase 5 — Backend contracts (parallel, 2–3 weeks) - -The contracts surfaced by `agent-site/` need to land in `internal/controlplane/`. These are independent of the UI repositioning but make the inspector + console more useful as they ship: - -- Receipts on changelog stream, `/why/:action_id`, ETag/If-Match middleware, X-AFS-Cost middleware, X-AFS-DryRun, SSE on `/v1/activity`, `POST /v1/auth/exchange`. (See earlier plan for details.) - ### Phase 6 — Cutover (3–4 days) - [ ] Final demo of new `ui/` vs old `ui/` (worktree) — confirm CLI-first feel @@ -95,6 +83,7 @@ The contracts surfaced by `agent-site/` need to land in `internal/controlplane/` ## Decisions / Blockers +- **Embedded Console attempt — abandoned 2026-05-02.** Tried a Redis-Insight-style embedded terminal on the Inspector page: strict-CLI-only commands, ghost-text autocomplete, plain-text output. Output was technically correct but the result didn't earn its place — felt like a half-shell rather than the protagonist. Removed in full. If we revisit, lean into the real CLI from a dedicated route (e.g. an inline-rendered `xterm.js` connected to a websocket-backed PTY running on the control plane in a sandbox) — a real terminal, not an emulation. Not on the current branch. - **Branch over parallel directory** — decided 2026-05-01. Reasons: avoid duplicated Clerk/router/query plumbing; avoid maintaining 3 UIs during transition; PRs review as refactor diffs; cutover is just `git merge` not a directory rename. Side-by-side need is met by `git worktree add ../afs-main main`. - **Inspector path** — leaning `/inspect` over `/dashboard` or `/`. Verb-y, observability-honest. Decide before Phase 4. - **First-run experience** — leaning toward "your CLI hasn't done anything yet" hero (no auto-provisioned starter workspace) to reinforce CLI-first story on day one. Open question. diff --git a/deploy/vercel/auth-plan.md b/plans/cloud-auth.md similarity index 100% rename from deploy/vercel/auth-plan.md rename to plans/cloud-auth.md diff --git a/deploy/vercel/onboarding-flows.md b/plans/cloud-onboarding.md similarity index 95% rename from deploy/vercel/onboarding-flows.md rename to plans/cloud-onboarding.md index 422f64d..d0df082 100644 --- a/deploy/vercel/onboarding-flows.md +++ b/plans/cloud-onboarding.md @@ -21,7 +21,7 @@ Both ramps should converge on the same steady state: As of 2026-04-17, the hosted production deploy now has these pieces in place: -- Vercel preview deploys boot from [main.go](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/main.go) +- Vercel preview deploys boot from [main.go](../deploy/vercel/main.go) - control-plane catalog uses Neon/Postgres in hosted mode - workspace data plane uses Redis Cloud via `REDIS_URL` - first-run hosted bootstrap auto-seeds a managed database profile when the @@ -161,8 +161,7 @@ The next major milestone is: 4. workspace ownership and authorization checks 5. browser-authenticated CLI handoff -See [auth-plan.md](/Users/rowantrollope/git/agent-filesystem/deploy/vercel/auth-plan.md) -for the recommended hosted auth direction. +See [cloud-auth.md](cloud-auth.md) for the recommended hosted auth direction. Important deployment rule: diff --git a/plans/post-review-followups.md b/plans/post-review-followups.md new file mode 100644 index 0000000..2b64992 --- /dev/null +++ b/plans/post-review-followups.md @@ -0,0 +1,237 @@ +# Post-Review Cleanup Follow-ups + +Status: active +Owner: rowan +Created: 2026-05-05 +Updated: 2026-05-05 + +## Goal + +Track the cleanup items identified in the pre-review pass on PR +[#13](https://github.com/redis/agent-filesystem/pull/13) but deliberately +deferred from that PR to keep individual commits scoped and reviewable. +Each item below has a concrete file:line anchor and a recommended +approach so future work can pick up cold. + +## Scope + +Code quality, structural refactors, and test-coverage gaps. Product +roadmap items (cloud, benchmarks, versioned-fs) belong in +[future-work.md](future-work.md) and are out of scope here. + +## What landed in PR #13 + +Listed for context so this plan stays additive. + +- Phase 1 — Doc/config paper cuts (`.dockerignore`, `.gitignore`, + `Makefile`, `example.afs.config.json`, plans archived/moved, + `cli-first-ui.md` dedupe, `repo-walkthrough.md` refresh). +- Phase 2 — Dead code (-480 LOC): `afs-situation-room-kit.tsx`, + `background-pattern.tsx`, `file_versions.go` trampolines, + `parentPath`/`baseName` in afsfs, `Capabilities` type alias, + `examples/codex-settings-migration.md`. +- Phase 3 — UI deps: jszip dynamic import, `lucide-icons.tsx` canonical + import, lint --fix. +- Phase 4 — UI helpers: `foundation/sort-compare.ts`, + `foundation/clipboard-icons.tsx`. +- Phase 5 — Reconciler renames: `sync_reconcile.go` → + `sync_full_reconciler.go`, `sync_reconciler.go` → + `sync_event_reconciler.go`. +- Phase 6 — `.github/workflows/ci.yml` (root, mount, sandbox, ui, ui-lint). +- Phase 7 — Sandbox bind default `127.0.0.1` + threat-model doc + WARNING + log on external bind. +- Phase 8 — FUSE write cache flush bug fix (afsfs handle/file route + through `*AtPath` variants). +- Phase 9 — UI lint baseline cleared (27 → 0); ui-lint job now blocking. +- Phase 10 — Sandbox HTTP timeouts, output buffer cap, process-map GC, + first sandbox tests. +- Phase 11 — Mount client typed error sentinels in + `mount/internal/client/errors.go`; `mapError` now uses `errors.Is` + with substring fallback for externally-formatted messages. +- Phase 12 — Extract MCP helpers to `internal/mcptools/` (text-patch, + args parsing, shared shapes); ~800 LOC of duplication consolidated to + one canonical implementation; aliases left in place transitionally. + +## Checklist + +### Tier A — Quick wins (under 1 hour each) + +- [ ] **Controlplane writeError sentinels.** + `internal/controlplane/http.go:2246-2255` still does + `strings.Contains(strings.ToLower(err.Error()), ...)` for status + mapping. Define sentinels mirroring the mount side + (`ErrAlreadyExists`, `ErrInvalidArgument`, `ErrUnsupported`, + `ErrNotAllowed`, `ErrIsDirectory`, `ErrNotDirectory`), wrap returns + from `service.go` and `database_manager.go`, switch + `writeError` to `errors.Is`. +- [ ] **Mount cache LRU bound.** + `mount/internal/cache/cache.go:25` is an unbounded + `map[string]AttrEntry`. With the 1-hour TTL and per-inode + + per-listing population, a million-file workspace OOMs the mount + daemon. Add an LRU cap (configurable, default ~100k entries) or a + periodic sweep loop. +- [ ] **Drop mcptools aliases + inline rename.** + `cmd/afs/afs_mcp.go` and + `internal/controlplane/mcp_hosted_helpers.go` retain + `type mcpFooBar = mcptools.FooBar` and + `var mcpFooBar = mcptools.FooBar` aliases from the Phase 12 + extraction. Replace ~150 call sites of `mcpRequiredString(...)` + with `mcptools.RequiredString(...)`. Mechanical, rg/sed-friendly. +- [ ] **Pre-existing redis-go deprecations.** + `internal/controlplane/store.go:420,444,629,656`, + `file_versions.go:871,893`, `import_lock.go:77` use + `ZRevRange`, `SetNX`, `ZRangeByScore` which the redis-go client + flags as deprecated. Migrate to `ZRangeArgs` (with `Rev` / + `ByScore` options) and `Set` with the `NX` option. +- [ ] **Per-database error swallowing.** + `internal/controlplane/database_manager.go:2057-2074`, + `:1444`, `:1488`, `:1538` silently `continue` on per-database + Redis errors. Accumulate via `errors.Join` and return; at + minimum log so a Redis blip is visible. +- [ ] **Two `panic(err)` calls on configuration paths.** + `internal/controlplane/auth.go:226` `NewNoAuthHandler` panics if + `DefaultAuthConfig()` ever fails to validate; + `internal/controlplane/blob_writer.go:150` `BlobWriter.newPipeline` + panics on an unsupported `redis.Cmdable`. Return errors so callers + can decide. + +### Tier B — Medium refactors (1–3 hours each) + +- [ ] **MCP `mcpDiffOperand*` extraction.** + Currently in both `cmd/afs/afs_mcp.go:1058-1126` and + `internal/controlplane/mcp_hosted.go`. Cannot move to `mcptools` + because the helpers take `controlplane.FileVersionDiffOperand` — + circular dep. Solution: move the operand type to a small shared + package (e.g. `internal/fileversionops/`) or pass the type as a + generic. ~150 lines consolidated. +- [ ] **Redis-value coercion duplicated three places.** + `cmd/afs/workspace_mount_bridge.go:420-448`, + `internal/controlplane/workspace_root_manifest.go:237-254`, + `cmd/afs/afs_search_index.go:118` reimplement + `coerceString`/`coerceInt64`. Extract to + `internal/rediscontent/`. +- [ ] **`use-afs.ts` mutation hook repetition.** + `ui/src/foundation/hooks/use-afs.ts:851-985` defines 13 + `useCreate*`/`useDelete*`/`useUpdate*` hooks that each repeat + the same `useWorkspaceInvalidation` + `useMutation` shape. + A `makeWorkspaceMutation()` factory shrinks this by ~120 lines. +- [ ] **Sandbox HTTP authentication.** + `--bind 127.0.0.1` is the only protection today. For callers + that need to bind externally, add a token check (env or flag- + sourced). Document the threat model addition in the sandbox + package comment that already exists. +- [ ] **Sandbox tests for pre-existing code.** + Phase 10 covered the new buffer + GC. The original + `Launch`/`Read`/`Wait`/`Kill` flow still has zero direct tests — + add lifecycle and timeout cases. +- [ ] **23 `type Foo = foo` aliases in `internal/controlplane`.** + `service.go:378-396` and `database_manager.go:112-114`. Either + export the underlying structs in place (and delete the aliases) + or document why the package wants the lowercase-internal / + Uppercase-API split. +- [ ] **Boolean-flag parameter sweep.** + `saveAFSManifest(..., bool)`, + `parseAFSArgs(args, allowForce, allowReadonly bool)`, + `cmdImportSelfHosted(..., replaceExisting bool, ..., mountAtSource bool)`, + `newReconciler(...)` 14 positional args. Replace with options + structs to make call sites readable. + +### Tier C — Big surgical refactors (half-day+ each) + +- [ ] **`Resolved` / scoped collapse in + `internal/controlplane/database_manager.go` + `http.go`.** + ~30 method pairs (`SaveCheckpoint` / + `SaveResolvedCheckpoint`, `RestoreCheckpoint` / + `RestoreResolvedCheckpoint`, etc.) differ only in whether they + call `resolveScopedWorkspace` or `resolveWorkspace`. ~1500 + lines of avoidable duplication. Plan: introduce a `route` + struct and one `resolve(ctx, databaseID, workspace) (*Service, + route, error)` helper that handles both cases via a default + empty `databaseID`. Independently: replace the 40-branch + `switch strings.HasSuffix(...)` URL dispatch in + `handleWorkspaceRoute` / `handleResolvedWorkspaceRoute` with a + real router (chi, gorilla, or `http.ServeMux` with `{workspace}` + placeholders if Go 1.22+). +- [ ] **File splits for the 2000+ LOC files.** + Each is a careful 1–2 hour exercise on its own. + - `ui/src/foundation/api/afs.ts` (3641 lines): split + `client.ts`, `http.ts`, `http-types.ts`, `mappers.ts`, + `demo/`. Hand-rolled and well-typed already; mostly + mechanical. + - `internal/controlplane/database_manager.go` (2916 lines): + split into `_registry.go`, `_workspace.go`, `_aggregate.go`. + After the `Resolved`-collapse the file shrinks first. + - `cmd/afs/afs_mcp.go` (2490 after Phase 12): split per tool + family (file-read, file-write, grep, workspace, checkpoint). + - `internal/controlplane/service.go` (2843 lines): split per + concern (`_workspace.go`, `_checkpoint.go`, `_session.go`, + `_tree.go`, `_helpers.go`). + - `internal/controlplane/http.go` (2271 lines): split into + `http_admin.go`, `http_client.go`, `http_workspace.go`, + `http_resolved.go`, `http_helpers.go`. Kill the 627-line + `newAdminMux` with route helpers. + - `cmd/afs/workspace_checkpoint_commands.go` (2105 lines). + - `internal/controlplane/mcp_hosted.go` (still 1753 after + Phase 12). +- [ ] **Big test-file unit / integration split.** + `internal/controlplane/http_test.go` (2507 lines), + `internal/controlplane/file_versions_test.go` (1944 lines), + `cmd/afs/sync_integration_test.go` (1887 lines). Tag the + integration-side with `//go:build integration` so plain + `go test ./...` stays fast. +- [ ] **Migrate mount tests off real `redis-server` to `miniredis`.** + `mount/internal/client/native_test.go` forks a real + `redis-server` per test, 46x in parallel. CI installs the + binary today (Phase 6) but the cost is real on smaller + runners. Use `miniredis` for the bulk; keep one or two + against a real server for behaviors miniredis can't model. +- [ ] **`mount/client/` shim duplication.** + Every new exported type in `mount/internal/client/` has to be + re-aliased in `mount/client/` because of the nested-module + boundary. Either auto-generate the shim from a list of + symbols, or merge the modules. +- [ ] **Drop the vestigial `native_*` prefix in mount client.** + `mount/internal/client/native_core.go`, `native_helpers.go`, + `native_text.go`, `native_walk.go`, `native_range.go`, + `native_locks.go`. There's only one client implementation; + the prefix carries no information. Rename and split + `native_helpers.go` (1418 lines) into `paths.go`, `search.go`, + `inode.go`. +- [ ] **Drop the `afs_*` prefix in `cmd/afs/`.** + `afs_commands.go`, `afs_types.go`, `afs_local.go`, + `afs_diff.go`, `afs_grep.go`, `afs_hash.go`, `afs_mcp.go` — + everything in `cmd/afs/` is already "afs". Drop. +- [ ] **`MountedFS.bash()` syncs the entire workspace per call.** + `sdk/python/src/redis_afs/client.py:374`, + `sdk/typescript/src/index.ts:538`. For a workspace with 10k + files that's hundreds of MCP round-trips per shell call. + Document loudly as O(workspace), or replace with a real + mount on the SDK side. + +## Decisions / Blockers + +- **`format-toggle.tsx` + `lib/snippets.ts` are explicitly held back + from any cleanup pass.** They are listed as Phase 1 deliverables of + the active [cli-first-ui.md](cli-first-ui.md) plan (harvested from + the now-removed `agent-site/`). If `cli-first-ui.md` is paused or + superseded, add a checkbox here to delete them as confirmed dead + code. +- **Sandbox tests for the existing `Launch`/`Read`/`Wait`/`Kill` + flow** are listed in Tier B, but the sandbox itself is the rare Go + surface that is also a security boundary. Tests should include + hostile-input cases, not just happy paths — budget for that when + scheduling. + +## Verification + +Each item should: +- Land as its own commit (or atomic series for the big surgical ones). +- Run `make test` plus the targeted module's tests after the change. +- For UI items: run `cd ui && npm run build && npm run test && + npm run lint` (lint is now blocking in CI). +- For mount/sandbox items: run `cd mount && go test ./...` / + `cd sandbox && go test ./...` respectively. + +## Result + +Fill in before archiving. diff --git a/sandbox/cmd/sandbox/main.go b/sandbox/cmd/sandbox/main.go index 5d389a2..4244943 100644 --- a/sandbox/cmd/sandbox/main.go +++ b/sandbox/cmd/sandbox/main.go @@ -1,4 +1,28 @@ // Command sandbox runs the Agent Filesystem sandbox server. +// +// SECURITY / THREAT MODEL +// +// The sandbox HTTP API runs arbitrary shell commands sent in the request +// body (POST /processes -> sh -c ). It has no authentication, no +// TLS, and no rate limiting. Any client that can reach the listen address +// can execute code as the sandbox process user, in the workspace directory. +// +// This is intentional: the sandbox is meant to be reached only by tightly +// trusted callers on the same host (e.g. an MCP host running as the same +// user, or a wrapper that gates access). It is NOT a public-facing +// service. +// +// To preserve that assumption the default bind address is 127.0.0.1. +// Binding to 0.0.0.0 or any externally reachable interface requires the +// caller to pass --bind explicitly and accept the consequences. +// +// If you need the sandbox to be reachable across hosts: +// - terminate auth at a proxy that fronts it, AND +// - bind the sandbox itself to 127.0.0.1 (or a unix-only interface), +// so the proxy is the only path in. +// +// MCP transport (--transport stdio) does not open a network socket and is +// not affected by these binding rules. package main import ( @@ -10,12 +34,24 @@ import ( "os" "os/signal" "syscall" + "time" "github.com/redis/agent-filesystem/sandbox/internal/api" "github.com/redis/agent-filesystem/sandbox/internal/executor" ) +// gcInterval is how often the process-map sweeper runs. +// gcMaxAge is how long terminal-state processes (exited / killed / timed-out) +// are retained before they are pruned. Long enough that a CLI client can +// reasonably read the result back, short enough that a long-lived sandbox +// doesn't accumulate forever. +const ( + gcInterval = 5 * time.Minute + gcMaxAge = 1 * time.Hour +) + func main() { + bind := flag.String("bind", "127.0.0.1", "HTTP server bind address. Default 127.0.0.1; set to 0.0.0.0 to expose externally (see security note in source).") port := flag.Int("port", 8090, "HTTP server port") workspace := flag.String("workspace", "/workspace", "Workspace directory") transport := flag.String("transport", "http", "Transport: http or stdio (MCP)") @@ -35,23 +71,38 @@ func main() { // HTTP server server := api.NewServer(manager) - addr := fmt.Sprintf(":%d", *port) + addr := fmt.Sprintf("%s:%d", *bind, *port) httpServer := &http.Server{ Addr: addr, Handler: server.Handler(), + // Bound the request side; the wait endpoints stream + // long-running process completion, so WriteTimeout is + // intentionally not set. + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 120 * time.Second, } + // Periodic process-map GC so terminal-state entries don't accumulate. + gcCtx, cancelGC := context.WithCancel(context.Background()) + defer cancelGC() + go runProcessGC(gcCtx, manager) + // Graceful shutdown go func() { sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) <-sigCh log.Println("Shutting down...") + cancelGC() httpServer.Shutdown(context.Background()) }() log.Printf("Sandbox server listening on %s", addr) + if *bind != "127.0.0.1" && *bind != "localhost" { + log.Printf("WARNING: bind=%s exposes shell-execution endpoints beyond localhost. There is no auth on this API.", *bind) + } log.Printf("Workspace: %s", *workspace) log.Printf("Endpoints:") log.Printf(" POST /processes - Launch process") @@ -65,3 +116,18 @@ func main() { log.Fatalf("Server error: %v", err) } } + +func runProcessGC(ctx context.Context, m *executor.Manager) { + tick := time.NewTicker(gcInterval) + defer tick.Stop() + for { + select { + case <-ctx.Done(): + return + case <-tick.C: + if removed := m.GC(gcMaxAge); removed > 0 { + log.Printf("sandbox: pruned %d terminal-state process(es)", removed) + } + } + } +} diff --git a/sandbox/internal/executor/buffer.go b/sandbox/internal/executor/buffer.go new file mode 100644 index 0000000..6789e51 --- /dev/null +++ b/sandbox/internal/executor/buffer.go @@ -0,0 +1,50 @@ +package executor + +import "sync" + +// boundedBuffer is a thread-safe byte buffer with a hard cap on retained +// bytes. Writes past the cap are silently dropped — the buffer keeps the +// first `cap` bytes and a "truncated" flag so callers can warn the user +// rather than letting a chatty process OOM the sandbox. +type boundedBuffer struct { + mu sync.Mutex + buf []byte + cap int + truncated bool +} + +func newBoundedBuffer(cap int) *boundedBuffer { + return &boundedBuffer{buf: make([]byte, 0, 4096), cap: cap} +} + +// Write appends p to the buffer up to the cap; bytes beyond the cap are +// dropped and Truncated() will report true. Always returns len(p) and nil +// error to keep io.Writer semantics intact for stdlib consumers. +func (b *boundedBuffer) Write(p []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + remaining := b.cap - len(b.buf) + if remaining <= 0 { + b.truncated = true + return len(p), nil + } + if len(p) > remaining { + b.buf = append(b.buf, p[:remaining]...) + b.truncated = true + return len(p), nil + } + b.buf = append(b.buf, p...) + return len(p), nil +} + +// String returns the current buffer content. If the buffer hit the cap, a +// trailing marker is appended so consumers can see that more output was +// dropped. +func (b *boundedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + if b.truncated { + return string(b.buf) + "\n[output truncated: sandbox buffer cap reached]\n" + } + return string(b.buf) +} diff --git a/sandbox/internal/executor/buffer_test.go b/sandbox/internal/executor/buffer_test.go new file mode 100644 index 0000000..28e41e6 --- /dev/null +++ b/sandbox/internal/executor/buffer_test.go @@ -0,0 +1,61 @@ +package executor + +import ( + "strings" + "testing" +) + +func TestBoundedBufferUnderCap(t *testing.T) { + b := newBoundedBuffer(100) + if _, err := b.Write([]byte("hello")); err != nil { + t.Fatalf("Write returned error: %v", err) + } + if got := b.String(); got != "hello" { + t.Fatalf("String() = %q, want %q", got, "hello") + } +} + +func TestBoundedBufferAtCap(t *testing.T) { + b := newBoundedBuffer(5) + if _, err := b.Write([]byte("hello")); err != nil { + t.Fatalf("Write returned error: %v", err) + } + if got := b.String(); got != "hello" { + t.Fatalf("String() = %q, want %q", got, "hello") + } +} + +func TestBoundedBufferTruncates(t *testing.T) { + b := newBoundedBuffer(5) + n, err := b.Write([]byte("hello world")) + if err != nil { + t.Fatalf("Write returned error: %v", err) + } + // boundedBuffer reports the full length so callers don't see short writes. + if n != len("hello world") { + t.Fatalf("Write returned n=%d, want %d", n, len("hello world")) + } + got := b.String() + if !strings.HasPrefix(got, "hello") { + t.Fatalf("String() = %q, expected hello prefix", got) + } + if !strings.Contains(got, "truncated") { + t.Fatalf("String() = %q, expected truncation marker", got) + } +} + +func TestBoundedBufferContinuesDroppingAfterCap(t *testing.T) { + b := newBoundedBuffer(3) + b.Write([]byte("abc")) + b.Write([]byte("defghi")) + got := b.String() + if !strings.HasPrefix(got, "abc") { + t.Fatalf("String() = %q, expected abc prefix", got) + } + if strings.Contains(got, "def") { + t.Fatalf("String() = %q, second write should have been dropped", got) + } + if !strings.Contains(got, "truncated") { + t.Fatalf("String() = %q, expected truncation marker", got) + } +} diff --git a/sandbox/internal/executor/manager_test.go b/sandbox/internal/executor/manager_test.go new file mode 100644 index 0000000..5892580 --- /dev/null +++ b/sandbox/internal/executor/manager_test.go @@ -0,0 +1,78 @@ +package executor + +import ( + "context" + "testing" + "time" +) + +// quickProcess launches `true` and waits for it to exit, returning the +// process ID after the entry is in StateExited. Useful for setting up GC +// scenarios without coupling to internal state directly. +func quickProcess(t *testing.T, m *Manager) string { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + res, err := m.Launch(ctx, LaunchOptions{ + Command: "true", + Wait: true, + }) + if err != nil { + t.Fatalf("Launch: %v", err) + } + if res.State != StateExited { + t.Fatalf("expected StateExited, got %s", res.State) + } + return res.ID +} + +func TestGCSkipsRunningProcesses(t *testing.T) { + m := NewManager(t.TempDir()) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Launch a long-running sleep without waiting. + res, err := m.Launch(ctx, LaunchOptions{Command: "sleep 30"}) + if err != nil { + t.Fatalf("Launch: %v", err) + } + defer m.Kill(res.ID) + + // Even with maxAge=0, running processes must not be pruned. + if removed := m.GC(0); removed != 0 { + t.Fatalf("GC pruned %d running processes; want 0", removed) + } + if list := m.List(); len(list) != 1 { + t.Fatalf("expected 1 process after GC, got %d", len(list)) + } +} + +func TestGCPrunesOldExitedProcesses(t *testing.T) { + m := NewManager(t.TempDir()) + id := quickProcess(t, m) + + // Process is in StateExited but EndedAt is recent. Calling GC with a + // large maxAge must not prune it. + if removed := m.GC(time.Hour); removed != 0 { + t.Fatalf("GC with large maxAge pruned %d entries; want 0", removed) + } + if list := m.List(); len(list) != 1 { + t.Fatalf("expected 1 process before GC, got %d", len(list)) + } + + // Backdate the EndedAt so the GC sees the entry as old. + m.mu.Lock() + proc := m.processes[id] + m.mu.Unlock() + proc.mu.Lock() + old := time.Now().Add(-2 * time.Hour) + proc.EndedAt = &old + proc.mu.Unlock() + + if removed := m.GC(time.Hour); removed != 1 { + t.Fatalf("GC removed %d entries; want 1", removed) + } + if list := m.List(); len(list) != 0 { + t.Fatalf("expected 0 processes after GC, got %d", len(list)) + } +} diff --git a/sandbox/internal/executor/monitor.go b/sandbox/internal/executor/monitor.go index a5aa0a4..d7660e8 100644 --- a/sandbox/internal/executor/monitor.go +++ b/sandbox/internal/executor/monitor.go @@ -181,3 +181,27 @@ func (m *Manager) Wait(ctx context.Context, id string) (*ReadResult, error) { return m.Read(id) } + +// GC prunes terminal-state processes whose EndedAt timestamp is older than +// maxAge. Without this, the process map grows without bound for the +// lifetime of the sandbox. Returns the number of entries removed. +func (m *Manager) GC(maxAge time.Duration) int { + cutoff := time.Now().Add(-maxAge) + m.mu.Lock() + defer m.mu.Unlock() + removed := 0 + for id, proc := range m.processes { + proc.mu.RLock() + state := proc.State + ended := proc.EndedAt + proc.mu.RUnlock() + if state == StateRunning { + continue + } + if ended != nil && ended.Before(cutoff) { + delete(m.processes, id) + removed++ + } + } + return removed +} diff --git a/sandbox/internal/executor/process.go b/sandbox/internal/executor/process.go index 1d96b81..851bd89 100644 --- a/sandbox/internal/executor/process.go +++ b/sandbox/internal/executor/process.go @@ -2,11 +2,11 @@ package executor import ( - "bytes" "context" "fmt" "io" "os/exec" + "strings" "sync" "syscall" "time" @@ -14,6 +14,11 @@ import ( "github.com/google/uuid" ) +// stdoutBufferCap bounds the in-memory output retained per process. A +// chatty process at high throughput would otherwise OOM the sandbox; bytes +// past this cap are dropped and a marker is added to the output. +const stdoutBufferCap = 1 << 20 // 1 MiB + // ProcessState represents the current state of a process. type ProcessState string @@ -36,8 +41,8 @@ type Process struct { PID int `json:"pid,omitempty"` cmd *exec.Cmd - stdout *bytes.Buffer - stderr *bytes.Buffer + stdout *boundedBuffer + stderr *boundedBuffer stdin io.WriteCloser mu sync.RWMutex done chan struct{} @@ -84,7 +89,7 @@ func (m *Manager) Launch(ctx context.Context, opts LaunchOptions) (*LaunchResult cwd := opts.Cwd if cwd == "" { cwd = m.workspace - } else if cwd[0] != '/' { + } else if !strings.HasPrefix(cwd, "/") { cwd = m.workspace + "/" + cwd } @@ -92,8 +97,8 @@ func (m *Manager) Launch(ctx context.Context, opts LaunchOptions) (*LaunchResult cmd.Dir = cwd cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} - stdout := &bytes.Buffer{} - stderr := &bytes.Buffer{} + stdout := newBoundedBuffer(stdoutBufferCap) + stderr := newBoundedBuffer(stdoutBufferCap) cmd.Stdout = stdout cmd.Stderr = stderr diff --git a/ui/package-lock.json b/ui/package-lock.json index 960e2b7..4e0e992 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -2571,9 +2571,9 @@ }, "node_modules/@redislabsdev/redis-ui-components": { "name": "@redis-ui/components", - "version": "44.0.2", - "resolved": "https://registry.npmjs.org/@redis-ui/components/-/components-44.0.2.tgz", - "integrity": "sha512-ldQzUV14452iVOuOxjnN3U9YbO6xm9UOmKpAHyJ8NZs53WAo4oCjGk4CImkf8cNh8YuDrVVg7RrZKbqFZqmSdg==", + "version": "44.1.0", + "resolved": "https://registry.npmjs.org/@redis-ui/components/-/components-44.1.0.tgz", + "integrity": "sha512-GFmN6yYxCItAmnnf/XUwLd1krCPU8B8ki3T79oQRW9k/8Kv8BKb0C7ZndINHuO0GolFN1wVyhKb3TNf4IihMlg==", "license": "UNLICENSED", "dependencies": { "@radix-ui/react-checkbox": "^1.0.3", @@ -2602,29 +2602,29 @@ "peerDependencies": { "@redis-ui/icons": "^6.9.2", "@redis-ui/styles": "^15.0.0", - "react": "^17.0.0 || ^18.0.0", - "react-dom": "^17.0.0 || ^18.0.0", + "react": "^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0", "styled-components": "^5.0.0" } }, "node_modules/@redislabsdev/redis-ui-icons": { "name": "@redis-ui/icons", - "version": "6.9.3", - "resolved": "https://registry.npmjs.org/@redis-ui/icons/-/icons-6.9.3.tgz", - "integrity": "sha512-CHWyqMn7ygGdIshgdgGLEqBd8X2g3XFBQHGhUXuMPgeq1nsYa9jiNbQfx9Hk65kv+AICr+ZfqM12irgAxpyenw==", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/@redis-ui/icons/-/icons-6.11.0.tgz", + "integrity": "sha512-Myox6U7AGZlyvWoNJq2kZ5frP4g2YiMXpZCtNPSw2cwo17NxEowm8Sswtnp5pdcfh3by/0Z98+zxSdrdxl2G8w==", "license": "UNLICENSED", "peerDependencies": { "@radix-ui/react-id": "^1.1.0", "@redis-ui/styles": "^15.0.0", - "react": "^17.0.0 || ^18.0.0", - "react-dom": "^17.0.0 || ^18.0.0" + "react": "^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" } }, "node_modules/@redislabsdev/redis-ui-styles": { "name": "@redis-ui/styles", - "version": "15.0.0", - "resolved": "https://registry.npmjs.org/@redis-ui/styles/-/styles-15.0.0.tgz", - "integrity": "sha512-Wic8KSOBHk0Idlnxbz4tDHiAW/Kw5ob7FOZdiAn8WReaGR8FKck195x1Pt4ZsyRCJ6gMyRUTW6o/PbhORGth9Q==", + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/@redis-ui/styles/-/styles-15.1.0.tgz", + "integrity": "sha512-IKAu6D4WR5uspJabI1U80s8YwM25bscs8HuUhK/hCDUCqLvrTeyDbHNq84mwwXcbdQgW1JnVDDeD/eYVNAy0XQ==", "license": "UNLICENSED", "dependencies": { "color-alpha": "^2.0.0", @@ -2632,16 +2632,16 @@ }, "peerDependencies": { "modern-normalize": "^3.0.1", - "react": "^17.0.0 || ^18.0.0", - "react-dom": "^17.0.0 || ^18.0.0", + "react": "^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0", "styled-components": "^5.0.0" } }, "node_modules/@redislabsdev/redis-ui-table": { "name": "@redis-ui/table", - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/@redis-ui/table/-/table-3.7.0.tgz", - "integrity": "sha512-uLeMgecqwMwdmk7sIxMHlfKPi4ZAivtfW9t2M3CPvlftQM7IY9ur2fwRgF4V8Fl8PrePyQ5V9/38GSSz0CPscg==", + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/@redis-ui/table/-/table-3.8.0.tgz", + "integrity": "sha512-qW4vGjX4lkCuxWD3YpLJB8hLfIVUQkYPStUtQOKexvfCcvqfuA8SM3wf0haZlNxO8vuBiBcT7UA9B9Icj6l78g==", "license": "UNLICENSED", "dependencies": { "@redis-ui/components": "^44.0.0", @@ -2651,8 +2651,8 @@ }, "peerDependencies": { "@redis-ui/styles": "^15.0.0", - "react": "^17.0.0 || ^18.0.0", - "react-dom": "^17.0.0 || ^18.0.0", + "react": "^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0", "styled-components": "^5.0.0" } }, diff --git a/ui/src/components/afs-situation-room-kit.tsx b/ui/src/components/afs-situation-room-kit.tsx deleted file mode 100644 index f677f59..0000000 --- a/ui/src/components/afs-situation-room-kit.tsx +++ /dev/null @@ -1,297 +0,0 @@ -/** - * AFS · Situation Room primitives - * - * Skin-native components inspired by the Situation Room HTML reference - * (Agent Filesystem Coder Edition/styles.css). They render usefully under - * the classic skin too, but their visual identity belongs to situation-room. - * - * Composition shapes: - * - * - * - * ⌘K - * - */ -import type { ReactNode } from "react"; -import styled from "styled-components"; - -/* ── Panel + PanelHead ─────────────────────────────────────────── */ - -export const Panel = styled.div` - border: 1px solid var(--afs-line-strong, var(--afs-line)); - border-radius: 4px; - background: var(--afs-bg-1, var(--afs-panel-strong)); - position: relative; - overflow: hidden; - - [data-skin="situation-room"] && { - border-radius: var(--afs-r-2); - } -`; - -const PanelHeadRow = styled.div` - display: grid; - grid-template-columns: auto 1fr auto; - align-items: center; - gap: 12px; - padding: 8px 12px; - border-bottom: 1px solid var(--afs-line-strong, var(--afs-line)); - background: var(--afs-bg-2, var(--afs-panel)); - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: var(--afs-fz-xs, 11px); - letter-spacing: 0.1em; - color: var(--afs-ink-dim, var(--afs-muted)); - text-transform: uppercase; -`; - -const Dots = styled.div` - display: flex; - gap: 6px; -`; - -const Dot = styled.span<{ $tone?: "g" | "a" | "r" | "muted" }>` - width: 8px; - height: 8px; - border-radius: 50%; - background: ${({ $tone = "muted" }) => - $tone === "g" - ? "var(--afs-ok, #DCFF1E)" - : $tone === "a" - ? "var(--afs-warn, #FFB547)" - : $tone === "r" - ? "var(--afs-err, #FF4D4D)" - : "var(--afs-line-strong, #284A5E)"}; -`; - -const PanelHeadTitle = styled.div` - color: var(--afs-ink); - font-weight: 500; - text-align: center; - letter-spacing: 0.05em; -`; - -const PanelHeadMeta = styled.div` - text-align: right; - color: var(--afs-ink-dim, var(--afs-muted)); -`; - -export function PanelHead(props: { title?: string; meta?: ReactNode; dots?: boolean }) { - return ( - - {props.dots !== false ? ( - - ) : ( - - )} - {props.title ?? ""} - {props.meta ?? ""} - - ); -} - -export const PanelBody = styled.div` - padding: 16px; - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: var(--afs-fz-sm, 12px); - color: var(--afs-ink); -`; - -/* ── Terminal block ────────────────────────────────────────────── */ - -export const Term = styled.div` - padding: 16px 18px; - background: var(--afs-bg, #091a23); - color: var(--afs-ink); - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: var(--afs-fz-sm, 12px); - line-height: 1.65; -`; - -export const TermLine = styled.div` - white-space: pre-wrap; -`; - -export const TermPrompt = styled.span` - color: var(--afs-accent, #DCFF1E); - user-select: none; - margin-right: 8px; -`; - -export const TermOut = styled.span` - color: var(--afs-ink-dim, var(--afs-muted)); -`; - -export const TermOk = styled.span` - color: var(--afs-ok, #DCFF1E); -`; - -export const TermWarn = styled.span` - color: var(--afs-warn, #FFB547); -`; - -export const TermErr = styled.span` - color: var(--afs-err, #FF4D4D); -`; - -export const TermCmt = styled.span` - color: var(--afs-ink-faint, #4A4C48); -`; - -export const TermCursor = styled.span` - display: inline-block; - width: 8px; - height: 1em; - background: var(--afs-accent, #DCFF1E); - vertical-align: -2px; - margin-left: 2px; - animation: afs-blink 1s steps(2, end) infinite; -`; - -/* ── Section header (eyebrow + title + meta) ───────────────────── */ - -const SectionHeadRow = styled.div` - display: grid; - grid-template-columns: auto 1fr auto; - align-items: end; - gap: 24px; - padding: 18px 0 14px; - border-bottom: 1px solid var(--afs-line); - margin-bottom: 24px; - - @media (max-width: 720px) { - grid-template-columns: auto 1fr; - } -`; - -const SectionHeadNum = styled.div` - color: var(--afs-accent); - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: var(--afs-fz-xs, 11px); - letter-spacing: 0.2em; -`; - -const SectionHeadTitle = styled.div` - color: var(--afs-ink); - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: var(--afs-fz-lg, 15px); - font-weight: 500; - letter-spacing: 0.04em; -`; - -const SectionHeadMeta = styled.div` - color: var(--afs-ink-dim, var(--afs-muted)); - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: var(--afs-fz-xs, 11px); - letter-spacing: 0.1em; - text-align: right; - - @media (max-width: 720px) { - grid-column: 1 / -1; - text-align: left; - } -`; - -export function SectionHead(props: { num?: string; title: string; meta?: ReactNode }) { - return ( - - {props.num ?? ""} - {props.title} - {props.meta ? {props.meta} : } - - ); -} - -/* ── KbdKey ────────────────────────────────────────────────────── */ - -export const KbdKey = styled.kbd` - display: inline-block; - padding: 2px 6px; - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: 10px; - border: 1px solid var(--afs-line-strong, var(--afs-line)); - color: var(--afs-ink-dim, var(--afs-muted)); - background: var(--afs-bg-1, var(--afs-panel)); - border-radius: 2px; - letter-spacing: 0.04em; -`; - -/* ── Spark (animated mini bar chart) ───────────────────────────── */ - -const SparkRow = styled.span` - display: inline-flex; - align-items: end; - gap: 2px; - height: 14px; -`; - -const SparkBar = styled.span<{ $delay: number }>` - display: inline-block; - width: 3px; - background: var(--afs-accent, #DCFF1E); - height: 60%; - animation: afs-spark 1.6s ease-in-out infinite; - animation-delay: ${({ $delay }) => `${$delay}ms`}; -`; - -export function Spark(props: { bars?: number }) { - const count = Math.max(2, Math.min(props.bars ?? 5, 12)); - return ( - - ); -} - -/* ── Eyebrow (small uppercase accent label) ────────────────────── */ - -export const Eyebrow = styled.span` - font-family: var(--afs-font-mono, var(--afs-mono)); - font-size: var(--afs-fz-xs, 11px); - color: var(--afs-accent); - letter-spacing: 0.2em; - text-transform: uppercase; -`; - -/* ── Brackets (decorative around text) ─────────────────────────── */ - -export function Brackets(props: { children: ReactNode }) { - return ( - - [ - {props.children} - ] - - ); -} - -const BracketSpan = styled.span` - color: var(--afs-accent); - font-family: var(--afs-font-mono, var(--afs-mono)); - margin: 0 4px; -`; - -/* ── LED dot (status indicator) ────────────────────────────────── */ - -export const Led = styled.span<{ $tone?: "ok" | "warn" | "err" | "info" }>` - display: inline-block; - width: 6px; - height: 6px; - border-radius: 50%; - background: ${({ $tone = "ok" }) => - $tone === "warn" - ? "var(--afs-warn)" - : $tone === "err" - ? "var(--afs-err)" - : $tone === "info" - ? "var(--afs-info)" - : "var(--afs-accent)"}; - box-shadow: 0 0 6px currentColor; - animation: afs-blink var(--afs-dur-tick, 1400ms) steps(2, end) infinite; - flex-shrink: 0; -`; diff --git a/ui/src/components/getting-started-onboarding-dialog.tsx b/ui/src/components/getting-started-onboarding-dialog.tsx index 25ef492..5c2c8fd 100644 --- a/ui/src/components/getting-started-onboarding-dialog.tsx +++ b/ui/src/components/getting-started-onboarding-dialog.tsx @@ -43,7 +43,7 @@ export function GettingStartedOnboardingDialog({ } const workspaceLabel = displayWorkspaceName(workspaceName); - const agentConnected = (agentsQuery.data ?? []).some( + const agentConnected = agentsQuery.data.some( (agent) => agent.workspaceId === workspaceId, ); diff --git a/ui/src/components/global-drawer.tsx b/ui/src/components/global-drawer.tsx index 770a74a..20346e7 100644 --- a/ui/src/components/global-drawer.tsx +++ b/ui/src/components/global-drawer.tsx @@ -22,7 +22,7 @@ export function GlobalDrawer() { const { state, close } = useDrawer(); const quickstartMutation = useQuickstartMutation(); const workspacesQuery = useScopedWorkspaceSummaries(); - const workspaces = workspacesQuery.data ?? []; + const workspaces = workspacesQuery.data; const haveAnyWorkspace = workspaces.length > 0; if (!state) return null; @@ -49,7 +49,7 @@ export function GlobalDrawer() { : "idle"; const errorMessage = quickstartMutation.isError - ? quickstartMutation.error.message?.includes("cannot connect") + ? quickstartMutation.error.message.includes("cannot connect") ? "Could not connect to Redis at localhost:6379. Start Redis or add a remote database, then retry." : quickstartMutation.error.message || "Something went wrong." : null; diff --git a/ui/src/components/lucide-icons.tsx b/ui/src/components/lucide-icons.tsx index fda5356..35d41ff 100644 --- a/ui/src/components/lucide-icons.tsx +++ b/ui/src/components/lucide-icons.tsx @@ -1,6 +1,6 @@ import type { ComponentType, SVGProps } from "react"; import type { MonochromeIconProps } from "@redis-ui/icons"; -import { useTheme } from "@redislabsdev/redis-ui-styles"; +import { useTheme } from "@redis-ui/styles"; import { Bell, BookOpen, @@ -35,12 +35,12 @@ function makeLucideIcon(Icon: LucideIcon, defaultLabel: string) { const theme = useTheme(); const sizeValue = customSize || - theme?.core.icon.size[size] || - theme?.core.icon.size.L || + theme.core.icon.size[size] || + theme.core.icon.size.L || 20; const colorValue = customColor || - (color && theme?.semantic.color.icon[color]) || + (color && theme.semantic.color.icon[color]) || "currentColor"; return ( diff --git a/ui/src/features/agent-experience/SiteModeFrame.tsx b/ui/src/features/agent-experience/SiteModeFrame.tsx index cc485ef..e9bdea4 100644 --- a/ui/src/features/agent-experience/SiteModeFrame.tsx +++ b/ui/src/features/agent-experience/SiteModeFrame.tsx @@ -2,7 +2,8 @@ import type { ReactNode } from "react"; import styled from "styled-components"; import { useStoredViewMode } from "../../foundation/hooks/use-stored-view-mode"; import { SiteAgentPane } from "./PublicAgentPane"; -import { SiteModeContext, type SiteMode } from "./site-mode-context"; +import { SiteModeContext } from "./site-mode-context"; +import type { SiteMode } from "./site-mode-context"; import { SiteModeSwitch } from "./SiteModeSwitch"; export function SiteModeFrame({ children }: { children: ReactNode }) { diff --git a/ui/src/features/agent-experience/public-agent-documents.ts b/ui/src/features/agent-experience/public-agent-documents.ts index 2ce1c28..fb2a4e5 100644 --- a/ui/src/features/agent-experience/public-agent-documents.ts +++ b/ui/src/features/agent-experience/public-agent-documents.ts @@ -1,4 +1,5 @@ -import { docsTopicById, docsTopics, type DocsTopic, type DocsTopicId } from "../docs/docs-topics"; +import { docsTopicById, docsTopics } from "../docs/docs-topics"; +import type { DocsTopic, DocsTopicId } from "../docs/docs-topics"; import { bottomNavigationItems, navigationItems, resolveNavigationTitleParts } from "../../layout/navigation-items"; import { publicNavItems, publicRepoLink } from "../../layout/public-navigation"; import { canonicalWorkspaceName, displayWorkspaceName } from "../../foundation/workspace-display"; diff --git a/ui/src/features/agents/CreateMCPAccessDialog.tsx b/ui/src/features/agents/CreateMCPAccessDialog.tsx index 2fc5d75..ed8b108 100644 --- a/ui/src/features/agents/CreateMCPAccessDialog.tsx +++ b/ui/src/features/agents/CreateMCPAccessDialog.tsx @@ -1,5 +1,6 @@ import { Button, Select } from "@redis-ui/components"; -import { useEffect, useMemo, useState, type FormEvent } from "react"; +import { useEffect, useMemo, useState } from "react"; +import type { FormEvent } from "react"; import styled from "styled-components"; import { DialogActions, @@ -259,7 +260,7 @@ export function CreateMCPAccessDialog({ })) } value={workspaceKey} - onChange={(next) => setWorkspaceKey(next as string)} + onChange={(next) => setWorkspaceKey(next)} disabled={options.length === 0} /> @@ -303,7 +304,7 @@ export function CreateMCPAccessDialog({ { value: "never", label: "No expiry" }, ]} value={expiry} - onChange={(next) => setExpiry(next as string)} + onChange={(next) => setExpiry(next)} /> diff --git a/ui/src/features/agents/LocalMCPAccessDialog.tsx b/ui/src/features/agents/LocalMCPAccessDialog.tsx index 31965b2..dbecadb 100644 --- a/ui/src/features/agents/LocalMCPAccessDialog.tsx +++ b/ui/src/features/agents/LocalMCPAccessDialog.tsx @@ -98,7 +98,7 @@ export function LocalMCPAccessDialog({ })) } value={workspaceKey} - onChange={(next) => setWorkspaceKey(next as string)} + onChange={(next) => setWorkspaceKey(next)} disabled={options.length === 0} /> diff --git a/ui/src/features/templates/TemplateInstallDetail.tsx b/ui/src/features/templates/TemplateInstallDetail.tsx index 5210061..d6496c1 100644 --- a/ui/src/features/templates/TemplateInstallDetail.tsx +++ b/ui/src/features/templates/TemplateInstallDetail.tsx @@ -1,5 +1,4 @@ import { Button } from "@redis-ui/components"; -import JSZip from "jszip"; import { useState } from "react"; import styled from "styled-components"; import { SurfaceCard } from "../../components/card-shell"; @@ -64,6 +63,7 @@ export async function downloadClaudePlugin(args: { token: string; }): Promise { const files = buildClaudePlugin(args); + const { default: JSZip } = await import("jszip"); const zip = new JSZip(); for (const file of files) { const isExecutable = file.path.endsWith(".sh"); diff --git a/ui/src/features/workspaces/CreateWorkspaceDialog.tsx b/ui/src/features/workspaces/CreateWorkspaceDialog.tsx index f82254b..6595fea 100644 --- a/ui/src/features/workspaces/CreateWorkspaceDialog.tsx +++ b/ui/src/features/workspaces/CreateWorkspaceDialog.tsx @@ -1,11 +1,6 @@ import { Button, Select } from "@redis-ui/components"; -import { - useEffect, - useMemo, - useRef, - useState, - type FormEvent, -} from "react"; +import { useEffect, useMemo, useRef, useState } from "react"; +import type { FormEvent } from "react"; import { useNavigate } from "@tanstack/react-router"; import styled from "styled-components"; import { @@ -23,20 +18,15 @@ import { } from "../../components/afs-kit"; import { SurfaceCard } from "../../components/card-shell"; import { getControlPlaneURL } from "../../foundation/api/afs"; -import { - type AFSDatabaseScopeRecord, - useDatabaseScope, -} from "../../foundation/database-scope"; +import { useDatabaseScope } from "../../foundation/database-scope"; +import type { AFSDatabaseScopeRecord } from "../../foundation/database-scope"; import { useCreateMCPAccessTokenMutation, useCreateWorkspaceMutation, useImportLocalMutation, } from "../../foundation/hooks/use-afs"; -import { - findTemplate, - type Template, - type TemplateSeedFile, -} from "../templates/templates-data"; +import { findTemplate } from "../templates/templates-data"; +import type { Template, TemplateSeedFile } from "../templates/templates-data"; type SeedMode = "blank" | "import"; type View = "chooser" | "template-form"; @@ -52,9 +42,12 @@ function eligibleDatabases(databases: AFSDatabaseScopeRecord[]) { return databases.filter((database) => database.canCreateWorkspaces); } -function preferredDatabase(databases: AFSDatabaseScopeRecord[]) { +function preferredDatabase( + databases: AFSDatabaseScopeRecord[], +): AFSDatabaseScopeRecord | null { const list = eligibleDatabases(databases); - return list.find((database) => database.isDefault) ?? list[0] ?? null; + if (list.length === 0) return null; + return list.find((database) => database.isDefault) ?? list[0]; } function isFreeTierLimitError(error: unknown): boolean { @@ -165,10 +158,8 @@ export function CreateWorkspaceDialog({ const startTemplate = initialTemplateId ? findTemplate(initialTemplateId) ?? null : null; - const startedFromTemplate = - startTemplate != null && startTemplate.id !== "blank"; - if (startedFromTemplate && startTemplate) { + if (startTemplate != null && startTemplate.id !== "blank") { setView("template-form"); setSelectedTemplateId(startTemplate.id); setName(startTemplate.slug); @@ -209,7 +200,7 @@ export function CreateWorkspaceDialog({ function handleFolderPicked(files: FileList | null) { if (!files || files.length === 0) return; - const path = files[0].webkitRelativePath?.split("/")[0] ?? ""; + const path = files[0].webkitRelativePath.split("/")[0]; if (!path) return; setImportPath(path); setImportFileCount(files.length); @@ -458,7 +449,7 @@ export function CreateWorkspaceDialog({ label: `${database.displayName || database.databaseName}${database.isDefault ? " (default)" : ""}`, }))} value={databaseId} - onChange={(next) => setDatabaseId(next as string)} + onChange={(next) => setDatabaseId(next)} /> ) : null} @@ -589,7 +580,7 @@ export function CreateWorkspaceDialog({ label: `${database.displayName || database.databaseName}${database.isDefault ? " (default)" : ""}`, }))} value={databaseId} - onChange={(next) => setDatabaseId(next as string)} + onChange={(next) => setDatabaseId(next)} /> ) : null} diff --git a/ui/src/foundation/api/afs.ts b/ui/src/foundation/api/afs.ts index 1611728..78cba71 100644 --- a/ui/src/foundation/api/afs.ts +++ b/ui/src/foundation/api/afs.ts @@ -893,7 +893,7 @@ function normalizeWorkspace(workspace: AFSWorkspace): AFSWorkspace { } function demoWorkspaceContentStorage(workspace: AFSWorkspace) { - const fileCount = workspace.fileCount ?? workspace.files.length; + const fileCount = workspace.fileCount; if (fileCount <= 0) { return { profile: "none" as const, diff --git a/ui/src/foundation/auth-context.tsx b/ui/src/foundation/auth-context.tsx index 4278758..2fd046e 100644 --- a/ui/src/foundation/auth-context.tsx +++ b/ui/src/foundation/auth-context.tsx @@ -1,5 +1,5 @@ import { ClerkProvider, useAuth, useUser } from "@clerk/react"; -import { useQuery } from "@tanstack/react-query"; +import { queryOptions, useQuery } from "@tanstack/react-query"; import { createContext, useContext, useMemo } from "react"; import type { PropsWithChildren } from "react"; import { afsApi } from "./api/afs"; @@ -35,7 +35,7 @@ function resolveDisplayName(config: AFSAuthConfig) { if (config.user?.email?.trim()) { return config.user.email.trim(); } - if (config.user?.subject?.trim()) { + if (config.user?.subject.trim()) { return config.user.subject.trim(); } if (config.mode === "clerk") { @@ -124,14 +124,16 @@ function decodeClerkFrontendHost(publishableKey?: string): string | null { } } +const authConfigQuery = queryOptions({ + queryKey: ["afs", "auth", "config"], + queryFn: () => afsApi.getAuthConfig(), + staleTime: 15_000, + gcTime: 10 * 60 * 1000, + retry: 1, +}); + export function AuthProvider(props: PropsWithChildren) { - const authQuery = useQuery({ - queryKey: ["afs", "auth", "config"], - queryFn: () => afsApi.getAuthConfig(), - staleTime: 15_000, - gcTime: 10 * 60 * 1000, - retry: 1, - }); + const authQuery = useQuery(authConfigQuery); const config = authQuery.data ?? defaultAuthConfig; const baseValue = useMemo(() => ({ diff --git a/ui/src/foundation/background-pattern.tsx b/ui/src/foundation/background-pattern.tsx deleted file mode 100644 index cfb4467..0000000 --- a/ui/src/foundation/background-pattern.tsx +++ /dev/null @@ -1,5 +0,0 @@ -import type { ReactNode } from "react"; - -export function BackgroundPatternProvider({ children }: { children: ReactNode }) { - return <>{children}; -} diff --git a/ui/src/foundation/clipboard-icons.tsx b/ui/src/foundation/clipboard-icons.tsx new file mode 100644 index 0000000..6077ecf --- /dev/null +++ b/ui/src/foundation/clipboard-icons.tsx @@ -0,0 +1,40 @@ +// Inline 11px SVG icons used by table copy-to-clipboard flash buttons. +// Standalone so we never reach for a heavyweight icon import for the smallest +// indicator state in the app. + +export function CopyIcon() { + return ( + + ); +} + +export function CheckIcon() { + return ( + + ); +} diff --git a/ui/src/foundation/database-scope.tsx b/ui/src/foundation/database-scope.tsx index 59c5239..2b5f897 100644 --- a/ui/src/foundation/database-scope.tsx +++ b/ui/src/foundation/database-scope.tsx @@ -142,9 +142,7 @@ export function DatabaseScopeProvider(props: PropsWithChildren) { ? null : databasesQuery.error instanceof Error ? databasesQuery.error.message - : databasesQuery.error != null - ? "Unable to load databases." - : null; + : null; const unavailableDatabases = useMemo( () => databases.filter((database) => !database.isHealthy), [databases], diff --git a/ui/src/foundation/sort-compare.ts b/ui/src/foundation/sort-compare.ts new file mode 100644 index 0000000..18b7e17 --- /dev/null +++ b/ui/src/foundation/sort-compare.ts @@ -0,0 +1,13 @@ +// Generic sort comparator used by table components. +// Numbers compare arithmetically; everything else compares as locale strings. +export function compareValues( + left: string | number, + right: string | number, + direction: "asc" | "desc", +) { + const result = + typeof left === "number" && typeof right === "number" + ? left - right + : String(left).localeCompare(String(right)); + return direction === "asc" ? result : result * -1; +} diff --git a/ui/src/foundation/tables/access-tokens-table.tsx b/ui/src/foundation/tables/access-tokens-table.tsx index 44af8f5..74b4c7e 100644 --- a/ui/src/foundation/tables/access-tokens-table.tsx +++ b/ui/src/foundation/tables/access-tokens-table.tsx @@ -1,7 +1,8 @@ import { Button, Typography } from "@redis-ui/components"; import { Table } from "@redis-ui/table"; import type { ColumnDef, SortingState } from "@redis-ui/table"; -import { useMemo, useState, type ReactNode } from "react"; +import { useMemo, useState } from "react"; +import type { ReactNode } from "react"; import { useNavigate } from "@tanstack/react-router"; import styled from "styled-components"; import { @@ -14,8 +15,10 @@ import { } from "../../components/afs-kit"; import { PlugIcon } from "../../components/lucide-icons"; import { getControlPlaneURL } from "../api/afs"; -import { isControlPlaneScope, type AFSMCPProfile, type AFSMCPToken } from "../types/afs"; +import { isControlPlaneScope } from "../types/afs"; +import type { AFSMCPProfile, AFSMCPToken } from "../types/afs"; import { findTemplate } from "../../features/templates/templates-data"; +import { compareValues } from "../sort-compare"; import * as S from "./workspace-table.styles"; type AccessTokenSortField = "name" | "workspaceName" | "lastUsedAt" | "expiresAt" | "createdAt"; @@ -32,18 +35,6 @@ type Props = { revoking?: boolean; }; -function compareValues( - left: string | number, - right: string | number, - direction: "asc" | "desc", -) { - const result = - typeof left === "number" && typeof right === "number" - ? left - right - : String(left).localeCompare(String(right)); - return direction === "asc" ? result : result * -1; -} - function formatProfile(profile: AFSMCPProfile) { switch (profile) { case "workspace-ro": diff --git a/ui/src/foundation/tables/activity-table.tsx b/ui/src/foundation/tables/activity-table.tsx index 0014acf..4f50d23 100644 --- a/ui/src/foundation/tables/activity-table.tsx +++ b/ui/src/foundation/tables/activity-table.tsx @@ -3,6 +3,7 @@ import { Table } from "@redis-ui/table"; import type { ColumnDef, SortingState } from "@redis-ui/table"; import { useMemo, useState } from "react"; import type { AFSActivityEvent } from "../types/afs"; +import { compareValues } from "../sort-compare"; import * as S from "./workspace-table.styles"; type ActivitySortField = "createdAt" | "workspaceName" | "title" | "scope" | "actor"; @@ -16,19 +17,6 @@ type Props = { onOpenActivity: (event: AFSActivityEvent) => void; }; -function compareValues( - left: string | number, - right: string | number, - direction: "asc" | "desc", -) { - const result = - typeof left === "number" && typeof right === "number" - ? left - right - : String(left).localeCompare(String(right)); - - return direction === "asc" ? result : result * -1; -} - export function ActivityTable({ rows, loading = false, diff --git a/ui/src/foundation/tables/agents-table.tsx b/ui/src/foundation/tables/agents-table.tsx index 6b6e4b7..cbaf5b1 100644 --- a/ui/src/foundation/tables/agents-table.tsx +++ b/ui/src/foundation/tables/agents-table.tsx @@ -623,7 +623,7 @@ export function AgentsTable({ {/* ---- MAP (TOPOLOGY) VIEW ---- */} {!loading && !error && filteredRows.length > 0 && viewMode === "map" && mapAvailable ? ( - + ) : null} {/* ---- TABLE VIEW ---- */} diff --git a/ui/src/foundation/tables/changes-table.tsx b/ui/src/foundation/tables/changes-table.tsx index 34525e6..42a3012 100644 --- a/ui/src/foundation/tables/changes-table.tsx +++ b/ui/src/foundation/tables/changes-table.tsx @@ -3,6 +3,7 @@ import { Table } from "@redis-ui/table"; import type { ColumnDef, SortingState } from "@redis-ui/table"; import { useMemo, useState } from "react"; import styled from "styled-components"; +import { compareValues } from "../sort-compare"; import { shortDateTime } from "../time-format"; import type { AFSChangelogEntry } from "../types/afs"; import { truncateMiddlePath } from "./changes-table-utils"; @@ -33,18 +34,6 @@ type Props = { onOpenChange?: (entry: HistoryTableRow) => void; }; -function compareValues( - left: string | number, - right: string | number, - direction: "asc" | "desc", -) { - const result = - typeof left === "number" && typeof right === "number" - ? left - right - : String(left).localeCompare(String(right)); - return direction === "asc" ? result : result * -1; -} - function formatSignedBytes(n?: number): string { if (n === undefined || n === 0) return "—"; const sign = n > 0 ? "+" : "−"; diff --git a/ui/src/foundation/tables/database-table.tsx b/ui/src/foundation/tables/database-table.tsx index 45b140c..5a603f7 100644 --- a/ui/src/foundation/tables/database-table.tsx +++ b/ui/src/foundation/tables/database-table.tsx @@ -17,6 +17,7 @@ import { import { SurfaceCard } from "../../components/card-shell"; import type { AFSDatabaseScopeRecord } from "../database-scope"; import { formatBytes } from "../api/afs"; +import { CheckIcon, CopyIcon } from "../clipboard-icons"; import { redisInsightUrl } from "../redis-insight"; import * as S from "./workspace-table.styles"; import { DenseTableViewport } from "./workspace-table.styles"; @@ -825,43 +826,6 @@ const CopyButton = styled.button` } `; -function CopyIcon() { - return ( - - ); -} - -function CheckIcon() { - return ( - - ); -} - /* ---- Usage cell ---- */ const UsageStack = styled.div` diff --git a/ui/src/foundation/tables/events-table.tsx b/ui/src/foundation/tables/events-table.tsx index 2a3c68c..de5ed9c 100644 --- a/ui/src/foundation/tables/events-table.tsx +++ b/ui/src/foundation/tables/events-table.tsx @@ -3,6 +3,7 @@ import { Table } from "@redis-ui/table"; import type { ColumnDef, SortingState } from "@redis-ui/table"; import { useMemo, useState } from "react"; import type { AFSEventEntry } from "../types/afs"; +import { compareValues } from "../sort-compare"; import * as S from "./workspace-table.styles"; type EventSortField = "createdAt" | "workspaceName" | "kind" | "actor" | "path"; @@ -17,19 +18,6 @@ type Props = { onOpenEvent: (event: AFSEventEntry) => void; }; -function compareValues( - left: string | number, - right: string | number, - direction: "asc" | "desc", -) { - const result = - typeof left === "number" && typeof right === "number" - ? left - right - : String(left).localeCompare(String(right)); - - return direction === "asc" ? result : result * -1; -} - export function EventsTable({ rows, loading = false, diff --git a/ui/src/foundation/tables/workspace-table.tsx b/ui/src/foundation/tables/workspace-table.tsx index c390b0b..cde8f46 100644 --- a/ui/src/foundation/tables/workspace-table.tsx +++ b/ui/src/foundation/tables/workspace-table.tsx @@ -10,6 +10,8 @@ import { useNavigate } from "@tanstack/react-router"; import styled, { css, keyframes } from "styled-components"; import { formatBytes } from "../api/afs"; import { useStoredViewMode } from "../hooks/use-stored-view-mode"; +import { CheckIcon, CopyIcon } from "../clipboard-icons"; +import { compareValues } from "../sort-compare"; import { shortDateTime } from "../time-format"; import type { AFSWorkspaceSummary } from "../types/afs"; import { displayWorkspaceName } from "../workspace-display"; @@ -69,19 +71,6 @@ function workspaceRowKey(workspace: AFSWorkspaceSummary) { return `${workspace.databaseId}:${workspace.id}`; } -function compareValues( - left: string | number, - right: string | number, - direction: "asc" | "desc", -) { - const result = - typeof left === "number" && typeof right === "number" - ? left - right - : String(left).localeCompare(String(right)); - - return direction === "asc" ? result : result * -1; -} - export function WorkspaceTable({ rows, loading = false, @@ -98,7 +87,10 @@ export function WorkspaceTable({ const [search, setSearch] = useState(""); const [sortBy, setSortBy] = useState("updatedAt"); const [sortDirection, setSortDirection] = useState<"asc" | "desc">("desc"); - const [viewMode, setViewMode] = useStoredViewMode("afs.workspaces.viewMode", "table"); + const [viewMode, setViewMode] = useStoredViewMode<"table" | "cards">( + "afs.workspaces.viewMode", + "table", + ); const [copiedId, setCopiedId] = useState(null); const navigate = useNavigate(); useMinuteTick(); @@ -282,7 +274,7 @@ export function WorkspaceTable({ disabled={isDeleting} onClick={(event) => { event.stopPropagation(); - onDeleteWorkspace?.(row.original); + onDeleteWorkspace(row.original); }} >