From be750bc28c4061a4f76eff4de128d923d552cadd Mon Sep 17 00:00:00 2001 From: James Ross Date: Wed, 25 Mar 2026 08:02:33 -0700 Subject: [PATCH 01/18] feat: add relay agent CLI foundation --- CONTRIBUTING.md | 323 ++++++++- ROADMAP.md | 441 ++++++++----- STATUS.md | 115 ++-- bin/actions.js | 26 +- bin/agent/cli.js | 493 ++++++++++++++ bin/agent/protocol.js | 164 +++++ bin/git-cas.js | 836 +++++++++++++----------- docs/design/0001-m18-relay-agent-cli.md | 324 +++++++++ docs/design/README.md | 14 + test/integration/agent-cli.test.js | 251 +++++++ test/unit/cli/agent-protocol.test.js | 107 +++ 11 files changed, 2456 insertions(+), 638 deletions(-) create mode 100644 bin/agent/cli.js create mode 100644 bin/agent/protocol.js create mode 100644 docs/design/0001-m18-relay-agent-cli.md create mode 100644 docs/design/README.md create mode 100644 test/integration/agent-cli.test.js create mode 100644 test/unit/cli/agent-protocol.test.js diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e45ed12..b69bdc3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,27 +1,296 @@ -# Contributing to @git-stunts/cas - -## Philosophy -- **Domain Purity**: Keep crypto and chunking logic independent of Git implementation details. -- **Portability**: The `GitPersistencePort` allows swapping the storage backend. - -## Development Workflow - -1. **Install Dependencies**: Use `pnpm install` to ensure consistent dependency management. -2. **Install Git Hooks**: Run `bash scripts/install-hooks.sh` to set up local quality gates. This will ensure that linting and unit tests pass before every push. -3. **Run Tests Locally**: - - `pnpm test` for unit tests. - - `pnpm run test:integration:node` for Node integration tests (requires Docker). - - `pnpm run test:integration:bun` for Bun integration tests. - - `pnpm run test:integration:deno` for Deno integration tests. -4. **Prepare Releases**: - - `pnpm release:verify` for the full release checklist and release-note summary output. - - Follow [docs/RELEASE.md](./docs/RELEASE.md) for the canonical patch-release flow. - -## Quality Gates -We enforce high standards for code quality: -- **Linting**: Must pass `pnpm run lint`. -- **Unit Tests**: All unit tests must pass. -- **Integration Tests**: Must pass across Node, Bun, and Deno runtimes. -- **Release Prep**: `pnpm release:verify` must pass before a tag is created. - -These gates are enforced both locally via git hooks and in CI/CD. +# Contributing to @git-stunts/git-cas + +`git-cas` is not just a bag of Git tricks. + +It is a deterministic artifact system built on Git's object database, with two +product surfaces over one shared core: + +- a human CLI/TUI +- a machine-facing agent surface + +If you contribute here, the job is not just to make code pass. The job is to +protect that product shape while making the system more capable. + +## Core Product Philosophy + +- Git is the substrate, not the product. +- Integrity is sacred. +- Restore must be deterministic. +- Provenance matters. +- Verification matters. +- GC-safe storage is non-negotiable. +- Human and agent surfaces are separate products over one domain core. +- The substrate may be sophisticated; the default UX must still feel boring, + trustworthy, and legible. + +The highest-level rule is simple: + +If a change makes storage less trustworthy, restore less deterministic, +automation less explicit, or the normal operator flow more demanding, it is +probably the wrong change. + +## Development Philosophy + +This project prefers: + +- DX over ceremony +- behavior over architecture theater +- explicit boundaries over clever coupling +- local-first, self-contained operation over service dependency +- boring human defaults over impressive internals +- machine contracts over scraped text + +In practice, that means: + +- keep commands small and obvious +- keep the default human UX boring and legible +- keep Git internals out of normal UX unless they are operationally necessary +- keep future automation concerns out of the human path until they are earned +- keep every human CLI command machine-readable through `--json` +- keep the future `git cas agent` surface JSONL-first and non-interactive + +## Architectural Principles + +### Hexagonal Architecture + +The product should keep clear boundaries between: + +- domain behavior +- application/use-case orchestration +- ingress adapters such as the human CLI/TUI and the agent CLI +- infrastructure such as Git persistence, refs, codecs, crypto, and filesystem + +Do not let UI concerns leak into persistence. +Do not let storage details leak into normal UX. +Do not let terminal behavior define the application boundary. + +### SOLID, Pragmatically Applied + +Use SOLID as boundary discipline, not as a pretext for abstraction sprawl. + +Good: + +- narrow modules +- explicit seams +- dependency inversion around important adapters +- shared application behavior consumed by multiple surfaces + +Bad: + +- abstraction for its own sake +- indirection before there is pressure for it +- architecture rituals that slow delivery without protecting behavior + +## Product Management Philosophy + +This project uses IBM Design Thinking style framing for milestone design: + +- sponsor user +- sponsor agent +- hills +- playback questions +- explicit non-goals + +Milestones should be grounded in user or agent value, not backend vanity. + +Before promoting a new direction, ask: + +- which hill does this support? +- what human or agent behavior does this improve? +- what trust does this increase? +- does this make the system more deterministic, more legible, or more + automatable? + +If the answer is unclear, the work probably belongs in the backlog, not the +roadmap. + +## Build Order + +The expected order of work is: + +1. Write or revise design docs first. +2. Encode behavior as executable tests second. +3. Implement third. + +Tests are the spec. + +Do not insert a second prose-spec layer between design and tests. +Do not treat implementation details as the primary unit of correctness. + +## Milestone Development Loop + +Each milestone should follow the same explicit loop: + +1. design docs first +2. tests as spec second +3. implementation third +4. retrospective after delivery +5. rewrite the root README to reflect reality +6. close the milestone in roadmap/status docs + +This loop is part of the process, not optional cleanup. + +The point is to keep the repo honest about: + +- what is planned +- what is specified +- what is actually implemented +- what was learned + +## Release Discipline + +Milestone closure and release discipline are coupled. + +Rules: + +- keep the root [CHANGELOG.md](./CHANGELOG.md) +- keep `package.json` and `jsr.json` versioned to reality, not aspiration +- when a milestone is closed, bump the in-flight version on the release commit +- create a Git tag on the commit that lands on `main` for that release +- follow [docs/RELEASE.md](./docs/RELEASE.md) instead of improvising release flow + +The version and tag should reflect milestone reality, not hopeful scope. + +## Testing Rules + +Tests must be deterministic. + +That means: + +- no real network dependency +- no ambient home-directory state +- no ambient Git config assumptions +- no interactive shell expectations in the core suite +- no timing-based flakes +- no shared mutable repository state between tests + +Every test that touches storage should use isolated temp state. + +Prefer: + +- throwaway local repos +- throwaway bare remotes when needed +- fixed env and fixed IDs where practical +- direct argv subprocess execution instead of shell-wrapped commands + +Tests should pin: + +- user-visible behavior +- integrity and restore correctness +- provenance and verification behavior +- immutability boundaries +- honest backup/storage semantics +- `--json` output contracts for the human CLI +- JSONL protocol contracts for the agent CLI as it lands + +Tests should not overfit: + +- class layout +- file-private helpers +- incidental implementation structure + +Local testing policy: + +- `npm test` is the default fast suite +- `pnpm run lint` must stay clean +- integration tests run through Docker-backed runtime targets +- `pnpm release:verify` is the release truth source +- install hooks with `bash scripts/install-hooks.sh` + +## Human Surface Guardrails + +Do not introduce any of the following into the normal operator path unless +explicitly re-approved: + +- hidden side effects +- smart guessing in place of explicit state +- TUI-only access to essential behavior +- substrate jargon when plain language will do +- prompts where flags or files should be accepted + +The human path should feel trustworthy and boring, not magical. + +## Agent Surface Guardrails + +The planned `git cas agent` surface is the automation contract. + +That implies: + +- no TTY branching +- no implicit prompts +- stdout carries only protocol data +- stderr carries structured warnings and errors +- side effects must be explicit +- failure modes must be actionable without scraping prose +- binary payloads do not share protocol stdout + +Do not let the agent surface become “human CLI plus `--json`.” + +## UX Language Rules + +Default human-facing language should prefer artifact and storage language over +Git internals. + +Prefer: + +- `stored` +- `verified` +- `restored` +- `encrypted` +- `vault` +- `backup pending` or `not yet backed up`, when such language is accurate + +Avoid leading with: + +- raw object-database trivia +- refs, trees, blobs, and OIDs unless the operator actually needs them + +Every human CLI command must also support `--json`. + +In `--json` mode: + +- human-readable text should be suppressed +- stdout should carry only the structured result payload +- stderr should carry warnings and errors + +For the agent CLI, the automation contract is JSONL-first and should stay +separate from the human `--json` surface. + +## Git Workflow + +Prefer small, honest commits. + +Do not rewrite shared history casually. +Prefer additive commits over history surgery. +Prefer merges over rebases for shared collaboration unless there is a compelling, +explicitly discussed reason otherwise. + +The point is not aesthetic Git history. The point is trustworthy collaboration. + +## What To Read First + +Before making non-trivial changes, read: + +- [README.md](./README.md) +- [STATUS.md](./STATUS.md) +- [ROADMAP.md](./ROADMAP.md) +- [docs/design/README.md](./docs/design/README.md) +- [docs/design/0001-m18-relay-agent-cli.md](./docs/design/0001-m18-relay-agent-cli.md) +- [docs/API.md](./docs/API.md) +- [docs/RELEASE.md](./docs/RELEASE.md) +- [COMPLETED_TASKS.md](./COMPLETED_TASKS.md) +- [CODE-EVAL.md](./CODE-EVAL.md) + +## Decision Rule + +When in doubt: + +- choose more trustworthy behavior +- choose clearer boundaries +- choose lower ceremony +- choose fewer hidden behaviors +- choose deterministic outputs +- choose main as the playback truth +- choose behavior over architecture theater +- protect the human path from unnecessary sophistication +- protect the future agent path from ambiguity diff --git a/ROADMAP.md b/ROADMAP.md index 8c8a5d5..694f3fe 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,19 +1,53 @@ -# @git-stunts/cas — ROADMAP +# @git-stunts/git-cas — ROADMAP -This document tracks the real current state of `git-cas` and the sequenced work that remains. -Completed milestone detail lives in [COMPLETED_TASKS.md](./COMPLETED_TASKS.md). Superseded work -lives in [GRAVEYARD.md](./GRAVEYARD.md). +This document tracks the real current state of `git-cas` and the sequenced work +that remains. -## Current Reality +It now follows the workflow defined in [CONTRIBUTING.md](./CONTRIBUTING.md): -- **Current release:** `v5.3.2` (2026-03-15) -- **Current line:** M16 Capstone shipped in `v5.3.0`; `v5.3.1` fixed repeated-chunk tree emission for repetitive content; `v5.3.2` stabilized test/runtime tooling; `v5.3.3` is the remaining M17 Ledger closeout in flight. -- **Supported runtimes:** Node.js 22.x (primary), Bun, Deno -- **Current operator experience:** the human-facing CLI/TUI is shipped now; the machine-facing agent CLI is planned next. +- sponsor user +- sponsor agent +- hills +- playback questions +- explicit non-goals +- design docs first, tests second, implementation third + +`main` is the playback truth. If code lands out of order, the roadmap adjusts to +match reality instead of pretending the original sequence still happened. -## Interface Strategy +Completed milestone detail lives in [COMPLETED_TASKS.md](./COMPLETED_TASKS.md). +Superseded work lives in [GRAVEYARD.md](./GRAVEYARD.md). -`git-cas` now has an explicit two-surface direction: +## Current Reality + +- **Last tagged release:** `v5.3.2` (`2026-03-15`) +- **Current package version on `main`:** `v5.3.3` +- **Supported runtimes:** Node.js 22.x (primary), Bun, Deno +- **Human surface reality:** the human CLI/TUI is already substantial and now + includes early repo-explorer work that belongs closer to the later UX line + than to M17 closeout. +- **Agent surface reality:** there is still no first-class `git cas agent` + contract. The main product gap is machine-facing determinism, not human + surface richness. +- **M17 reality:** the M17 closeout work is materially present on `main` + (`CODEOWNERS`, release verification, test conventions, property coverage), + even though release bookkeeping and docs drifted. +- **Next deliberate focus:** the next few cycles are agent-first. The human + surface should now follow the application boundaries that fall out of the + machine surface, not the other way around. + +## Product Doctrine + +- Git is the substrate, not the product. +- Integrity is sacred. +- Restore must be deterministic. +- Provenance matters. +- Verification matters. +- Human CLI/TUI and agent CLI are separate surfaces over one shared domain core. +- The default human UX should stay boring and trustworthy. +- The default machine UX should stay deterministic and replayable. + +## Two-Surface Strategy ### Human CLI/TUI @@ -21,228 +55,275 @@ This is the current public operator surface. - Existing `git cas ...` commands remain the stable human workflow. - Bijou formatting, prompts, dashboards, and TTY-aware behavior stay here. -- `--json` remains supported as convenience structured output for humans and simple scripts. -- Human-facing improvements continue under the Bijou/TUI roadmap. +- The human `--json` flag remains convenience structured output for humans and + simple scripts. +- Future human-surface work should reuse shared app-layer behavior instead of + inventing parallel logic in the TUI. ### Agent CLI -This is planned work starting in **M18 Relay**. +This is now the priority surface. - Namespace: `git cas agent` -- Output: JSONL on `stdout` only, one record per line -- No Bijou formatting, no TTY-mode branching, no implicit prompts -- Stable event envelope: `protocol`, `command`, `type`, `seq`, `ts`, `data` -- Reserved record types: `start`, `progress`, `warning`, `needs-input`, `result`, `error`, `end` -- One-shot commands in v1: stream records during execution, then exit -- Non-interactive secret/input handling: - - missing required input -> emit `needs-input`, exit `2` - - fatal execution failure -> emit `error`, exit `1` - - integrity/verification failure -> exit `3` - - success -> exit `0` -- Request input supports normal flags plus `--request -` and `--request @file.json` - -The agent CLI is a first-class workflow, not an extension of the human `--json` mode. - -## Shipped Summary - -| Version | Milestone | Codename | Theme | Status | -|---------|-----------|----------|-------|--------| -| v3.1.0 | M13 | Bijou | TUI dashboard and animated progress | ✅ Shipped | -| v4.0.0 | M14 | Conduit | Streaming restore, observability, parallel chunk I/O | ✅ Shipped | -| v4.0.1 | M8 + M9 | Spit Shine + Cockpit | Review hardening, `verify`, `--json`, CLI polish | ✅ Shipped | -| v5.0.0 | M10 | Hydra | Content-defined chunking | ✅ Shipped | -| v5.1.0 | M11 | Locksmith | Envelope encryption and recipient management | ✅ Shipped | -| v5.2.0 | M12 | Carousel | Key rotation without re-encrypting data | ✅ Shipped | -| v5.3.0 | M16 | Capstone | Audit remediation and security hardening | ✅ Shipped | -| v5.3.1 | — | Maintenance | Repeated-chunk tree integrity fix | ✅ Shipped | -| v5.3.2 | — | Maintenance | Vitest workspace split, CLI version sync, and runtime/tooling stabilization | ✅ Shipped | +- Output: JSONL on `stdout`, one protocol record per line +- `stderr`: structured warnings and errors only +- No TTY branching, no implicit prompts, no Bijou rendering +- Stable record envelope: `protocol`, `command`, `type`, `seq`, `ts`, `data` +- Reserved record types: `start`, `progress`, `warning`, `needs-input`, + `result`, `error`, `end` +- Missing required input emits `needs-input` and exits with a distinct code +- Integrity and verification failures get their own exit-code semantics +- The agent CLI is a first-class workflow, not an extension of the human + `--json` path + +## Honest State of `main` + +### Human Surface + +What is already true on `main`: + +- chunked Git-backed storage, restore, verify, encryption, recipients, and + rotation are already shipped in the domain/library +- the vault workflow is real and GC-safe +- diagnostics and release verification already exist +- the TUI has already moved beyond a simple vault inspector into a richer + repository explorer with refs browsing, source inspection, treemap views, and + a stronger theme layer + +This means the human surface is no longer the thing waiting to become real. It +is already real and ahead of the planning docs. + +### Agent Surface + +What is still missing: + +- a first-class machine runner +- a JSONL protocol contract +- exact machine-facing exit-code semantics +- non-interactive input handling as a core design constraint +- parity for the operational command set without scraping human CLI output + +This is the current product bottleneck. + +## Tagged Releases + +| Version | Milestone | Theme | Status | +| -------- | ------------- | ----------------------------------------------------------------------- | --------- | +| `v5.3.2` | Maintenance | Vitest workspace split, CLI version sync, runtime/tooling stabilization | ✅ Tagged | +| `v5.3.1` | Maintenance | Repeated-chunk tree integrity fix | ✅ Tagged | +| `v5.3.0` | M16 Capstone | Audit remediation and security hardening | ✅ Tagged | +| `v5.2.0` | M12 Carousel | Key rotation without re-encrypting data | ✅ Tagged | +| `v5.1.0` | M11 Locksmith | Envelope encryption and recipient management | ✅ Tagged | +| `v5.0.0` | M10 Hydra | Content-defined chunking | ✅ Tagged | +| `v4.0.1` | M8 + M9 | Review hardening, `verify`, `--json`, CLI polish | ✅ Tagged | +| `v4.0.0` | M14 Conduit | Streaming restore, observability, parallel chunk I/O | ✅ Tagged | +| `v3.1.0` | M13 Bijou | TUI dashboard and animated progress | ✅ Tagged | Older history remains in [CHANGELOG.md](./CHANGELOG.md). -## Planned Release Sequence - -| Version | Milestone | Codename | Theme | Status | -|---------|-----------|----------|-------|--------| -| v5.3.3 | M17 | Ledger | Planning and ops reset | 📝 Planned | -| v5.4.0 | M18 | Relay | LLM-native CLI foundation | 📝 Planned | -| v5.5.0 | M19 | Nouveau | Bijou v3 human UX refresh | 📝 Planned | -| v5.6.0 | M20 | Sentinel | Vault health and safety | 📝 Planned | -| v5.7.0 | M21 | Atelier | Vault ergonomics and publishing | 📝 Planned | -| v5.8.0 | M22 | Cartographer | Repo intelligence and change analysis | 📝 Planned | -| v5.9.0 | M23 | Courier | Artifact sets and transfer | 📝 Planned | -| v5.10.0 | M24 | Spectrum | Storage and observability extensibility | 📝 Planned | -| v5.11.0 | M25 | Bastion | Enterprise key management research | 📝 Planned | - -## Dependency Sequence - -```text -M16 Capstone + v5.3.1/v5.3.2 maintenance ✅ - | - M17 Ledger - | - M18 Relay - | - M19 Nouveau - | - M20 Sentinel - | - M21 Atelier - | - M22 Cartographer - | - M23 Courier - | - M24 Spectrum - | - M25 Bastion -``` - -This sequence is intentionally linear. It forces the docs/ops reset first, then the machine -interface split, then the human TUI refresh, and only then the broader feature expansion. +## Untagged `main` Line + +The current `main` branch is ahead of the last tagged release. + +It currently includes: + +- the M17 closeout work that was previously tracked as pending +- package version `5.3.3` +- early human-surface repo-explorer work that landed ahead of the old planned + sequence + +The roadmap therefore treats the next planning cycle as a recentering cycle, +not as a continuation of stale milestone fiction. + +## Near-Term Priority Stack + +1. **M18 Relay foundation** + Build the first credible agent contract. +2. **Relay follow-through** + Stay agent-first until the machine surface can handle core workflows without + scraping or prompting. +3. **M19 Nouveau** + Resume major human-surface work only after the agent surface has forced + cleaner application boundaries. ## Open Milestones -### M17 — Ledger (`v5.3.3`) +### M18 — Relay (`v5.4.0` target) + +**Theme:** first-class agent CLI foundation. + +**Sponsor user** + +- A maintainer or release engineer who wants to automate `git-cas` operations + without scraping terminal text. + +**Sponsor agent** + +- A coding agent, CI job, release bot, or backup workflow that needs exact, + replayable outcomes and explicit side effects. + +**Hills** + +- A sponsor agent can inspect, verify, and query `git-cas` state through a + stable JSONL protocol without depending on TTY behavior or human-readable + formatting. +- A sponsor user can trust automation built on `git-cas` because failures, + warnings, and requested inputs are explicit and machine-actionable. + +**Playback questions** + +- Can an agent complete `inspect`, `verify`, `vault list`, `vault info`, + `vault history`, `doctor`, and `vault stats` without scraping prose? +- Are protocol records ordered, typed, and stable across Node, Bun, and Deno? +- Does `stdout` remain pure protocol output after the first record? +- Are missing inputs and integrity failures distinguished cleanly by both record + type and exit code? + +**Explicit non-goals** -**Theme:** planning and operational reset after Capstone. +- No long-lived session protocol. +- No TUI redesign. +- No attempt to turn the human `--json` path into the automation contract. +- No binary restore payload over protocol `stdout`. -Deliverables: +**Work order** -- Close M16 in docs and reconcile [ROADMAP.md](./ROADMAP.md), [STATUS.md](./STATUS.md), and the shipped version history. -- Add `CODEOWNERS` or equivalent review-assignment automation. -- Document Git tree filename ordering semantics in test conventions to prevent future false positives. -- Define a release-prep workflow for `CHANGELOG` updates and version bump timing. -- Automate test-count injection into release notes or changelog prep. -- Add property-based fuzz coverage for envelope-encryption round-trips. +1. Write the agent protocol design doc. +2. Write contract tests for record order, shapes, `stdout` purity, `stderr` + behavior, and exit codes. +3. Implement a dedicated machine runner. +4. Ship read-heavy parity first: + `agent inspect`, `agent verify`, `agent vault list`, `agent vault info`, + `agent vault history`, `agent doctor`, `agent vault stats`. -### M18 — Relay (`v5.4.0`) +**Acceptance** -**Theme:** first-class LLM-native CLI. +- The protocol contract is documented in-repo. +- The read-heavy agent commands are JSONL-first and non-interactive. +- Contract tests pass on Node, Bun, and Deno. +- The human CLI continues to work unchanged outside explicitly shared internals. -Deliverables: +### Relay Follow-through (`v5.5.0` target) -- Introduce `git cas agent` as a separate machine-facing namespace. -- Add a dedicated machine command runner instead of extending the current human `runAction()` path. -- Define and implement the JSONL envelope contract: - `protocol`, `command`, `type`, `seq`, `ts`, `data`. -- Implement reserved record types: - `start`, `progress`, `warning`, `needs-input`, `result`, `error`, `end`. -- Enforce non-interactive behavior for secrets and missing inputs. -- Support flags plus `--request -` / `--request @file.json`. -- Deliver parity for: - `agent store`, `agent tree`, `agent inspect`, `agent restore`, `agent verify`, - `agent vault list`, `agent vault info`, `agent vault history`. -- Publish contract docs with exact exit-code behavior. +**Theme:** bring the agent surface to operational parity before more large +human-surface pushes. -Acceptance: +**Sponsor user** -- JSONL contract tests must verify record order, record shapes, `stdout` purity, `stderr` silence after protocol start, and exit codes on Node, Bun, and Deno. +- A maintainer who wants to wire `git-cas` into repeatable backup, restore, + publish, or release flows. -### M19 — Nouveau (`v5.5.0`) +**Sponsor agent** -**Theme:** Bijou v3 refresh for the human-facing experience. +- An autonomous system that must perform state-changing workflows end-to-end + with explicit inputs and replayable outcomes. -Deliverables: +**Hills** -- Upgrade `@flyingrobots/bijou`, `@flyingrobots/bijou-node`, and `@flyingrobots/bijou-tui` to `3.0.0`. -- Add `@flyingrobots/bijou-tui-app` for the refreshed shell. -- Move inspector/dashboard rendering onto the v3 `ViewOutput` contract. -- Split the current inspector into sub-apps for list, detail, history, and health panes. -- Add BCSS-driven responsive styling and layout presets. -- Add motion for focus shifts, pane changes, and shell transitions where it improves legibility. -- Add session restore for the human TUI layout. -- Replace the current low-fidelity heatmap/detail composition with a higher-fidelity surface-native view. +- A sponsor agent can complete the core `git-cas` operational loop + non-interactively: store, restore, rotate, recipient management, and vault + administration. +- A sponsor user can build automation on top of `git-cas` without needing a + human escape hatch for normal success paths. -Acceptance: +**Playback questions** -- Existing human CLI behavior stays stable outside the refreshed TUI. -- PTY smoke coverage must exercise inspect/dashboard navigation, filtering, resize, pane composition, and non-TTY fallback. +- Can an agent complete encrypted store and restore flows without prompting? +- Are passphrase files, request payloads, and missing-input branches explicit? +- Are state-changing side effects obvious in protocol output? +- Can agents reason about failures without parsing human error text? -### M20 — Sentinel (`v5.6.0`) +**Explicit non-goals** -**Theme:** vault health, crypto hygiene, and safety workflows. +- No long-lived interactive agent session. +- No human-surface expansion that bypasses the shared command/model layer. +- No hidden convenience prompting in the machine path. -Deliverables: +**Work order** -- `git cas vault status` -- `git cas gc` -- `encryptionCount` auto-rotation policy -- `.casrc` KDF parameter tuning with safe validation -- Human CLI warnings for nonce budget and KDF health -- Agent CLI warnings/results for the same health signals +1. Extend the design doc to cover write flows and input request semantics. +2. Extend contract tests to state-changing commands and failure branches. +3. Implement: + `agent store`, `agent tree`, `agent restore`, `agent rotate`, + `agent recipient ...`, and the vault write flows that belong in the machine + surface. +4. Add structured warnings for safety and policy signals that agents can act on. -### M21 — Atelier (`v5.7.0`) +**Acceptance** -**Theme:** vault ergonomics and publishing workflows. +- Core state-changing workflows are machine-accessible without prompting. +- Input request behavior is explicit and documented. +- Cross-runtime contract tests cover both read and write paths. +- The machine surface is credible enough to become the app-layer reference for + later human-surface work. -Deliverables: +### M19 — Nouveau (after Relay is credible) -- Named vaults -- `git cas vault add` to adopt existing trees -- Vault export flows: - - whole vault export - - single-entry export - - bulk export -- Publish flows: - - publish to working tree - - publish to branch - - auto-publish hook support -- File-level `--passphrase` CLI for standalone encrypted store flows +**Theme:** human UX refresh on top of agent-native application boundaries. -### M22 — Cartographer (`v5.8.0`) +Some groundwork has already landed on `main`: -**Theme:** repo intelligence and artifact comparison. +- repo explorer shell +- refs browser +- source inspection +- treemap atlas and drilldown +- stronger theme and motion work -Deliverables: +That work should now be treated as input, not as permission to keep pushing the +human surface ahead of the machine surface. -- Duplicate-detection warnings during store -- `git cas scan` / dedup advisor -- Manifest diff engine -- Machine diff stream for the agent CLI -- Human compare view layered on the M19 shell +**Sponsor user** -### M23 — Courier (`v5.9.0`) +- An operator who wants to inspect, understand, and recover artifact state with + less uncertainty and less CLI memorization. -**Theme:** artifact sets and transport. +**Sponsor agent** -Deliverables: +- An agent that benefits when the human surface reuses the same shared + application operations instead of bespoke TUI behavior. -- Snapshot trees for directory-level store and restore -- Portable bundles for air-gap transfer -- Watch mode built on snapshot-root semantics rather than ad hoc per-file state +**Hill** -### M24 — Spectrum (`v5.10.0`) +- The human surface becomes easier to trust because it sits on top of cleaner, + explicit app-layer behavior that was first forced into shape by the agent CLI. -**Theme:** storage and observability extensibility. +**Explicit non-goals** -Deliverables: +- No bespoke TUI-only behavior that bypasses shared command/model boundaries. +- No large human-surface push before Relay and Relay follow-through are credible. -- `CompressionPort` -- Additional codecs: `zstd`, `brotli`, `lz4` -- Prometheus/OpenTelemetry adapter for `ObservabilityPort` +## Later Lines -### M25 — Bastion (`v5.11.0`) +The later roadmap remains directionally the same, but detailed scoping stays +light until the agent-first line is delivered. -**Theme:** enterprise key-management research with hard exit criteria. +| Line | Theme | +| ------------ | -------------------------------------------------- | +| Sentinel | Vault health, crypto hygiene, and safety workflows | +| Atelier | Vault ergonomics and publishing workflows | +| Cartographer | Repo intelligence and artifact comparison | +| Courier | Artifact sets and transport | +| Spectrum | Storage and observability extensibility | +| Bastion | Enterprise key-management research | -Deliverables: +## Milestone Delivery Rules -- ADR for external key-management support -- Threat model for HSM/Vault-backed key flows -- Proof-of-concept `KeyManagementPort` adapter -- Decision memo on whether enterprise key management should become a product milestone +Every milestone follows the repository workflow discipline: -## Delivery Standards +1. design docs first +2. tests as spec second +3. implementation third +4. retrospective after delivery +5. rewrite the root README to reflect reality +6. close the milestone in roadmap/status docs -Every planned milestone follows the repository release discipline: +Additional release discipline: -- Human CLI/TUI behavior remains backward compatible unless a release explicitly declares otherwise. -- The human `--json` flag remains convenience output, not the automation contract. -- The first machine interface release is JSONL-only and one-shot; no session protocol is planned before the contract proves useful. -- `agent restore` writes to the filesystem in v1; binary payloads do not share protocol `stdout`. -- Any user-visible feature added after M18 must include: - - at least one human CLI/TUI test, and - - at least one agent-protocol test when the feature is exposed to the machine surface. +- tagged releases reflect reality, not aspiration +- the human `--json` flag remains convenience output, not the automation + contract +- the machine surface stays JSONL-first and one-shot until a stronger protocol + is justified by playback ## Document Boundaries diff --git a/STATUS.md b/STATUS.md index a545122..3feb237 100644 --- a/STATUS.md +++ b/STATUS.md @@ -1,85 +1,92 @@ -# @git-stunts/cas — Project Status +# @git-stunts/git-cas — Project Status -**Current release:** `v5.3.2` -**Current branch version:** `v5.3.3` -**Last release:** `2026-03-15` -**Current line:** M16 Capstone shipped in `v5.3.0`; `v5.3.1` fixed repeated-chunk tree emission; `v5.3.2` stabilized test/runtime tooling; `v5.3.3` is the remaining M17 closeout in flight. +**Last tagged release:** `v5.3.2` (`2026-03-15`) +**Current package version on `main`:** `v5.3.3` +**Playback truth:** `main` **Runtimes:** Node.js 22.x, Bun, Deno +**Current strategic focus:** agent-first for the next few cycles --- -## Interface Strategy +## Honest State -- **Human CLI/TUI:** the current public operator surface. Existing `git cas ...` commands, Bijou formatting, prompts, dashboards, and `--json` convenience output stay here. -- **Agent CLI:** planned next as `git cas agent`. It will be JSONL-first, non-interactive by default, and independent from Bijou rendering or TTY-only behavior. +- The human CLI/TUI is already real and ahead of the old planning docs. +- M17 closeout work is materially on `main`, even though the release/docs + bookkeeping drifted. +- Early repo-explorer and TUI refresh work also landed on `main` ahead of the + old sequence. +- The biggest product gap is now the missing first-class agent CLI. --- -## Recently Shipped +## Two Surfaces -| Version | Milestone | Highlights | -|---------|-----------|------------| -| `v5.3.2` | Maintenance | Vitest workspace split for deterministic integration runs; CLI version sync; test/runtime tooling stabilization | -| `v5.3.1` | Maintenance | Repeated-chunk tree integrity fix; unique chunk tree entries; `git fsck` regression coverage | -| `v5.3.0` | M16 Capstone | Audit remediation, `.casrc`, passphrase-file support, restore guards, `encryptionCount`, lifecycle rename | -| `v5.2.0` | M12 Carousel | Key rotation without re-encrypting data | -| `v5.1.0` | M11 Locksmith | Envelope encryption and recipient management | -| `v5.0.0` | M10 Hydra | Content-defined chunking | -| `v4.0.1` | M8 + M9 | Review hardening, `verify`, `--json`, CLI polish | -| `v4.0.0` | M14 Conduit | Streaming restore, observability, parallel chunk I/O | -| `v3.1.0` | M13 Bijou | Interactive dashboard and animated progress | +- **Human CLI/TUI:** stable operator surface, boring by default, `--json` kept + as convenience structured output for humans and simple scripts. +- **Agent CLI:** next priority surface, JSONL-first, non-interactive, and + separate from the human `--json` path. -Milestone labels are thematic and non-sequential; the versions above are listed in release order. +--- + +## Current Hills + +### Human Hill + +A human operator can store, inspect, verify, restore, and manage artifacts with +confidence and without memorizing Git plumbing. + +### Agent Hill + +A coding agent, CI job, or release bot can execute core `git-cas` workflows +through a stable machine contract without scraping prose or depending on TTY +behavior. --- ## Next Up -### M17 — Ledger (`v5.3.3`) +### M18 — Relay (`v5.4.0` target) + +**Sponsor user** + +- Maintainer or release engineer building automation around `git-cas` + +**Sponsor agent** + +- Coding agent, CI job, release bot, or backup workflow -Planning and ops reset: +**Hill** -- Reconcile `ROADMAP.md`, `STATUS.md`, and release messaging -- Add review automation (`CODEOWNERS` or equivalent) -- Document Git tree ordering test conventions -- Define release-prep workflow for changelog/version timing -- Automate test-count injection into release notes or changelog prep -- Add property-based fuzz coverage for envelope encryption +- Read-heavy `git-cas` operations become available through a first-class + JSONL-first machine protocol with explicit exit-code semantics. -### M18 — Relay (`v5.4.0`) +**Immediate work order** -LLM-native CLI foundation: +1. protocol design doc +2. contract tests +3. dedicated machine runner +4. read-heavy command parity -- Introduce `git cas agent` -- Define the JSONL protocol envelope and exit codes -- Add machine-facing parity for the current operational command set -- Enforce strict non-interactive input handling +### Relay Follow-through (`v5.5.0` target) -### M19 — Nouveau (`v5.5.0`) +- Stay agent-first until state-changing flows are also credible for automation. -Human UX refresh: +### M19 — Nouveau (after Relay is credible) -- Upgrade Bijou packages to `3.0.0` -- Move the inspector shell to the v3 `ViewOutput` model -- Split the dashboard into sub-apps -- Add better styling, motion, layout persistence, and richer heatmap/detail rendering +- Resume major human-surface work only after the agent surface has forced + cleaner app-layer boundaries. --- -## Sequenced Roadmap +## Sequence Snapshot -| Version | Milestone | Theme | -|---------|-----------|-------| -| `v5.3.3` | M17 Ledger | Planning and ops reset | -| `v5.4.0` | M18 Relay | LLM-native CLI foundation | -| `v5.5.0` | M19 Nouveau | Bijou v3 human UX refresh | -| `v5.6.0` | M20 Sentinel | Vault health and safety | -| `v5.7.0` | M21 Atelier | Vault ergonomics and publishing | -| `v5.8.0` | M22 Cartographer | Repo intelligence and change analysis | -| `v5.9.0` | M23 Courier | Artifact sets and transfer | -| `v5.10.0` | M24 Spectrum | Storage and observability extensibility | -| `v5.11.0` | M25 Bastion | Enterprise key-management research | +| Order | Focus | +| ----- | ----------------------------------------------------------- | +| Now | Relay foundation | +| Next | Relay follow-through | +| Then | Nouveau | +| Later | Sentinel, Atelier, Cartographer, Courier, Spectrum, Bastion | --- -*Future details: [ROADMAP.md](./ROADMAP.md) | Shipped detail: [COMPLETED_TASKS.md](./COMPLETED_TASKS.md) | Superseded: [GRAVEYARD.md](./GRAVEYARD.md)* +_Future detail: [ROADMAP.md](./ROADMAP.md) | Shipped detail: [COMPLETED_TASKS.md](./COMPLETED_TASKS.md) | Release history: [CHANGELOG.md](./CHANGELOG.md)_ diff --git a/bin/actions.js b/bin/actions.js index 35dea70..61167e5 100644 --- a/bin/actions.js +++ b/bin/actions.js @@ -6,6 +6,7 @@ /** @type {Readonly>} */ const HINTS = { + INVALID_INPUT: 'Check the agent command name and required input fields', MISSING_KEY: 'Provide --key-file or --vault-passphrase', MANIFEST_NOT_FOUND: 'Verify the tree OID contains a manifest', VAULT_ENTRY_NOT_FOUND: "Run 'git cas vault list' to see available entries", @@ -16,7 +17,8 @@ const HINTS = { RECIPIENT_NOT_FOUND: 'No recipient with that label exists in the manifest', RECIPIENT_ALREADY_EXISTS: 'A recipient with that label already exists', CANNOT_REMOVE_LAST_RECIPIENT: 'At least one recipient must remain in the manifest', - ROTATION_NOT_SUPPORTED: 'Key rotation requires envelope encryption — store with --recipient first', + ROTATION_NOT_SUPPORTED: + 'Key rotation requires envelope encryption — store with --recipient first', }; /** @@ -31,7 +33,9 @@ function writeError(err, json) { if (json) { /** @type {{ error: string, code?: string }} */ const obj = { error: message }; - if (code) { obj.code = code; } + if (code) { + obj.code = code; + } process.stderr.write(`${JSON.stringify(obj)}\n`); } else { const prefix = code ? `error [${code}]: ` : 'error: '; @@ -62,7 +66,9 @@ function getHint(code) { * @returns {Promise} */ function defaultDelay(ms) { - return new Promise((resolve) => { setTimeout(resolve, ms); }); + return new Promise((resolve) => { + setTimeout(resolve, ms); + }); } /** @@ -73,10 +79,16 @@ function defaultDelay(ms) { * @param {{ delay?: (ms: number) => Promise, setExitCode?: (code: number) => void }} [options] - Injectable dependencies. * @returns {(...args: any[]) => Promise} Wrapped action. */ -export function runAction(fn, getJson, { - delay = defaultDelay, - setExitCode = (code) => { process.exitCode = code; }, -} = {}) { +export function runAction( + fn, + getJson, + { + delay = defaultDelay, + setExitCode = (code) => { + process.exitCode = code; + }, + } = {} +) { return async (/** @type {any[]} */ ...args) => { try { await fn(...args); diff --git a/bin/agent/cli.js b/bin/agent/cli.js new file mode 100644 index 0000000..18a5fcc --- /dev/null +++ b/bin/agent/cli.js @@ -0,0 +1,493 @@ +import { readFileSync } from 'node:fs'; +import path from 'node:path'; +import { parseArgs } from 'node:util'; +import ContentAddressableStore from '../../index.js'; +import { createGitPlumbing } from '../../src/infrastructure/createGitPlumbing.js'; +import { buildVaultStats, inspectVaultHealth } from '../ui/vault-report.js'; +import { filterEntries } from '../ui/vault-list.js'; +import { AGENT_EXIT_CODES, createAgentSession, getAgentExitCode } from './protocol.js'; + +const AVAILABLE_COMMANDS = Object.freeze([ + 'inspect', + 'verify', + 'doctor', + 'vault list', + 'vault info', + 'vault history', + 'vault stats', +]); + +const REQUEST_OPTION = { request: { type: 'string' } }; + +/** + * @param {string} cwd + * @returns {ContentAddressableStore} + */ +function createCas(cwd) { + const plumbing = createGitPlumbing({ cwd }); + return new ContentAddressableStore({ plumbing }); +} + +/** + * @param {string} message + * @param {Record} [meta] + * @returns {Error & { code: string, meta?: Record }} + */ +function invalidInput(message, meta) { + const err = /** @type {Error & { code: string, meta?: Record }} */ ( + new Error(message) + ); + err.code = 'INVALID_INPUT'; + if (meta) { + err.meta = meta; + } + return err; +} + +/** + * @param {string | undefined} request + * @param {NodeJS.ReadStream} stdin + * @returns {Promise>} + */ +async function readRequestPayload(request, stdin) { + if (!request) { + return {}; + } + + let raw; + if (request === '-') { + raw = await readStream(stdin); + } else if (request.startsWith('@')) { + raw = readFileSync(path.resolve(request.slice(1)), 'utf8'); + } else { + raw = request; + } + + if (!raw.trim()) { + throw invalidInput('Agent request payload must not be empty'); + } + + let parsed; + try { + parsed = JSON.parse(raw); + } catch (err) { + throw invalidInput( + `Invalid JSON request payload: ${err instanceof Error ? err.message : String(err)}` + ); + } + + if (!parsed || typeof parsed !== 'object' || Array.isArray(parsed)) { + throw invalidInput('Agent request payload must be a JSON object'); + } + + return parsed; +} + +/** + * @param {NodeJS.ReadStream} stream + * @returns {Promise} + */ +async function readStream(stream) { + if (typeof stream.setEncoding === 'function') { + stream.setEncoding('utf8'); + } + + let raw = ''; + for await (const chunk of stream) { + raw += String(chunk); + } + return raw; +} + +/** + * @param {string[]} args + * @param {Record} options + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ values: Record, positionals: string[] }>} + */ +async function parseAgentInput(args, options, stdin) { + let parsed; + try { + parsed = parseArgs({ + args, + allowPositionals: true, + strict: true, + options: { + ...options, + ...REQUEST_OPTION, + }, + }); + } catch (err) { + throw invalidInput(err instanceof Error ? err.message : String(err)); + } + + const request = await readRequestPayload(parsed.values.request, stdin); + const values = { ...request, ...parsed.values }; + delete values.request; + + return { values, positionals: parsed.positionals }; +} + +/** + * @param {string[]} positionals + * @param {string[]} names + * @returns {Record} + */ +function assignPositionals(positionals, names) { + if (positionals.length > names.length) { + throw invalidInput( + `Unexpected positional arguments: ${positionals.slice(names.length).join(' ')}` + ); + } + + /** @type {Record} */ + const assigned = {}; + names.forEach((name, index) => { + if (positionals[index] !== undefined) { + assigned[name] = positionals[index]; + } + }); + return assigned; +} + +/** + * @param {Record} input + * @returns {{ cwd: string, slug?: string, oid?: string }} + */ +function resolveTarget(input) { + if (input.slug && input.oid) { + throw invalidInput('Provide --slug or --oid, not both'); + } + if (!input.slug && !input.oid) { + throw invalidInput('Provide --slug or --oid '); + } + return { + cwd: input.cwd || '.', + ...(input.slug ? { slug: input.slug } : {}), + ...(input.oid ? { oid: input.oid } : {}), + }; +} + +/** + * @param {unknown} value + * @returns {number | undefined} + */ +function parsePositiveInteger(value) { + if (value === undefined) { + return undefined; + } + + if (typeof value === 'number' && Number.isSafeInteger(value) && value > 0) { + return value; + } + + if (typeof value === 'string' && /^\d+$/.test(value)) { + const parsed = Number(value); + if (Number.isSafeInteger(parsed) && parsed > 0) { + return parsed; + } + } + + throw invalidInput('Expected a positive integer'); +} + +/** + * @param {{ cwd: string, slug?: string, oid?: string }} input + * @returns {Promise<{ cas: ContentAddressableStore, treeOid: string }>} + */ +async function resolveTree(input) { + const cas = createCas(input.cwd); + const treeOid = input.oid || (await cas.resolveVaultEntry({ slug: input.slug })); + return { cas, treeOid }; +} + +/** + * @param {string[]} argv + * @returns {{ command: string, args: string[] }} + */ +function resolveCommand(argv) { + if (argv.length === 0) { + return { command: 'agent', args: [] }; + } + + if (argv[0] === 'vault') { + if (!argv[1]) { + return { command: 'vault', args: [] }; + } + return { command: `vault.${argv[1]}`, args: argv.slice(2) }; + } + + return { command: argv[0], args: argv.slice(1) }; +} + +/** + * @param {string[]} argv + * @param {{ stdout?: Pick, stderr?: Pick, stdin?: NodeJS.ReadStream }} [deps] + * @returns {Promise} + */ +export async function runAgentCli( + argv, + { stdout = process.stdout, stderr = process.stderr, stdin = process.stdin } = {} +) { + const { command, args } = resolveCommand(argv); + const session = createAgentSession({ command, stdout, stderr }); + session.writeStart({ argv }); + + try { + const outcome = await executeAgentCommand(command, args, stdin); + const exitCode = outcome.exitCode ?? AGENT_EXIT_CODES.SUCCESS; + process.exitCode = exitCode; + session.writeResult(outcome.data); + session.writeEnd({ ok: exitCode === AGENT_EXIT_CODES.SUCCESS, exitCode }); + } catch (err) { + const exitCode = getAgentExitCode(err); + process.exitCode = exitCode; + session.writeError(err); + session.writeEnd({ ok: false, exitCode }); + } +} + +/** + * @param {string} command + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ exitCode?: number, data: Record }>} + */ +async function executeAgentCommand(command, args, stdin) { + switch (command) { + case 'inspect': + return inspectCommand(args, stdin); + case 'verify': + return verifyCommand(args, stdin); + case 'doctor': + return doctorCommand(args, stdin); + case 'vault.list': + return vaultListCommand(args, stdin); + case 'vault.info': + return vaultInfoCommand(args, stdin); + case 'vault.history': + return vaultHistoryCommand(args, stdin); + case 'vault.stats': + return vaultStatsCommand(args, stdin); + default: + throw invalidInput('Unknown agent command', { + command, + availableCommands: AVAILABLE_COMMANDS, + }); + } +} + +/** + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ data: Record }>} + */ +async function inspectCommand(args, stdin) { + const { values, positionals } = await parseAgentInput( + args, + { + slug: { type: 'string' }, + oid: { type: 'string' }, + cwd: { type: 'string' }, + }, + stdin + ); + const positionalInput = assignPositionals(positionals, []); + const input = resolveTarget({ ...values, ...positionalInput }); + const { cas, treeOid } = await resolveTree(input); + const manifest = await cas.readManifest({ treeOid }); + return { + data: { + treeOid, + manifest: manifest.toJSON(), + }, + }; +} + +/** + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ exitCode: number, data: Record }>} + */ +async function verifyCommand(args, stdin) { + const { values, positionals } = await parseAgentInput( + args, + { + slug: { type: 'string' }, + oid: { type: 'string' }, + cwd: { type: 'string' }, + }, + stdin + ); + const positionalInput = assignPositionals(positionals, []); + const input = resolveTarget({ ...values, ...positionalInput }); + const { cas, treeOid } = await resolveTree(input); + const manifest = await cas.readManifest({ treeOid }); + const ok = await cas.verifyIntegrity(manifest); + + return { + exitCode: ok ? AGENT_EXIT_CODES.SUCCESS : AGENT_EXIT_CODES.VERIFICATION_FAILED, + data: { + ok, + slug: manifest.slug, + treeOid, + chunks: manifest.chunks.length, + }, + }; +} + +/** + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ exitCode: number, data: Record }>} + */ +async function doctorCommand(args, stdin) { + const { values, positionals } = await parseAgentInput( + args, + { + cwd: { type: 'string' }, + }, + stdin + ); + assignPositionals(positionals, []); + + const cas = createCas(values.cwd || '.'); + const report = await inspectVaultHealth(cas); + const exitCode = + report.status === 'ok' ? AGENT_EXIT_CODES.SUCCESS : AGENT_EXIT_CODES.VERIFICATION_FAILED; + + return { + exitCode, + data: { report }, + }; +} + +/** + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ data: Record }>} + */ +async function vaultListCommand(args, stdin) { + const { values, positionals } = await parseAgentInput( + args, + { + cwd: { type: 'string' }, + filter: { type: 'string' }, + }, + stdin + ); + assignPositionals(positionals, []); + + const cas = createCas(values.cwd || '.'); + const all = await cas.listVault(); + const entries = filterEntries(all, values.filter); + + return { + data: { entries }, + }; +} + +/** + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ data: Record }>} + */ +async function vaultInfoCommand(args, stdin) { + const { values, positionals } = await parseAgentInput( + args, + { + cwd: { type: 'string' }, + encryption: { type: 'boolean' }, + }, + stdin + ); + const input = { ...values, ...assignPositionals(positionals, ['slug']) }; + + if (!input.slug) { + throw invalidInput('Provide a vault slug'); + } + + const cas = createCas(input.cwd || '.'); + const treeOid = await cas.resolveVaultEntry({ slug: input.slug }); + /** @type {Record} */ + const result = { + slug: input.slug, + treeOid, + }; + + if (input.encryption) { + const metadata = await cas.getVaultMetadata(); + if (metadata?.encryption) { + result.encryption = metadata.encryption; + } + } + + return { data: result }; +} + +/** + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ data: Record }>} + */ +async function vaultHistoryCommand(args, stdin) { + const { values, positionals } = await parseAgentInput( + args, + { + cwd: { type: 'string' }, + 'max-count': { type: 'string' }, + }, + stdin + ); + assignPositionals(positionals, []); + + const plumbing = createGitPlumbing({ cwd: values.cwd || '.' }); + const argsForGit = ['log', '--oneline', ContentAddressableStore.VAULT_REF]; + const maxCount = parsePositiveInteger(values['max-count']); + if (maxCount !== undefined) { + argsForGit.push(`-${maxCount}`); + } + + const output = await plumbing.execute({ args: argsForGit }); + const history = output + .split('\n') + .filter(Boolean) + .map((line) => { + const [commitOid, ...messageParts] = line.trim().split(/\s+/); + return { commitOid, message: messageParts.join(' ') }; + }); + + return { data: { history } }; +} + +/** + * @param {string[]} args + * @param {NodeJS.ReadStream} stdin + * @returns {Promise<{ data: Record }>} + */ +async function vaultStatsCommand(args, stdin) { + const { values, positionals } = await parseAgentInput( + args, + { + cwd: { type: 'string' }, + filter: { type: 'string' }, + }, + stdin + ); + assignPositionals(positionals, []); + + const cas = createCas(values.cwd || '.'); + const all = await cas.listVault(); + const entries = filterEntries(all, values.filter); + const records = []; + for (const entry of entries) { + const manifest = await cas.readManifest({ treeOid: entry.treeOid }); + records.push({ ...entry, manifest }); + } + + return { + data: { + stats: buildVaultStats(records), + }, + }; +} diff --git a/bin/agent/protocol.js b/bin/agent/protocol.js new file mode 100644 index 0000000..610b7da --- /dev/null +++ b/bin/agent/protocol.js @@ -0,0 +1,164 @@ +import { HINTS } from '../actions.js'; + +export const AGENT_PROTOCOL = 'git-cas-agent/v1'; + +export const AGENT_EXIT_CODES = Object.freeze({ + SUCCESS: 0, + FAILURE: 1, + INVALID_INPUT: 2, + VERIFICATION_FAILED: 3, +}); + +/** + * Map an operational error to an agent exit code. + * + * @param {unknown} err + * @returns {number} + */ +export function getAgentExitCode(err) { + const code = getErrorCode(err); + + if (code === 'INVALID_INPUT' || code === 'NEEDS_INPUT') { + return AGENT_EXIT_CODES.INVALID_INPUT; + } + + if (code === 'INTEGRITY_ERROR') { + return AGENT_EXIT_CODES.VERIFICATION_FAILED; + } + + return AGENT_EXIT_CODES.FAILURE; +} + +/** + * Normalize an error into the JSONL protocol shape. + * + * @param {unknown} err + * @returns {{ code: string, message: string, retryable: boolean, hint?: string, meta?: Record }} + */ +export function normalizeAgentError(err) { + const code = getErrorCode(err) || 'ERROR'; + const message = getErrorMessage(err); + const retryable = getErrorRetryable(err); + + /** @type {{ code: string, message: string, retryable: boolean, hint?: string, meta?: Record }} */ + const data = { code, message, retryable }; + + if (Object.prototype.hasOwnProperty.call(HINTS, code)) { + data.hint = HINTS[code]; + } + + const meta = getErrorMeta(err); + if (meta) { + data.meta = meta; + } + + return data; +} + +/** + * @param {unknown} err + * @returns {string | undefined} + */ +function getErrorCode(err) { + if (typeof err === 'object' && err && typeof err.code === 'string') { + return err.code; + } + return undefined; +} + +/** + * @param {unknown} err + * @returns {string} + */ +function getErrorMessage(err) { + return err instanceof Error ? err.message : String(err); +} + +/** + * @param {unknown} err + * @returns {boolean} + */ +function getErrorRetryable(err) { + if (typeof err === 'object' && err && typeof err.retryable === 'boolean') { + return err.retryable; + } + return false; +} + +/** + * @param {unknown} err + * @returns {Record | undefined} + */ +function getErrorMeta(err) { + if ( + typeof err === 'object' && + err && + err.meta && + typeof err.meta === 'object' && + !Array.isArray(err.meta) + ) { + return err.meta; + } + return undefined; +} + +/** + * Create a JSONL session writer for the agent protocol. + * + * @param {{ + * command: string, + * stdout?: Pick, + * stderr?: Pick, + * now?: () => Date, + * }} options + */ +export function createAgentSession({ + command, + stdout = process.stdout, + stderr = process.stderr, + now = () => new Date(), +}) { + let seq = 0; + + /** + * @param {Pick} stream + * @param {string} type + * @param {Record} data + */ + function write(stream, type, data) { + seq += 1; + const row = { + protocol: AGENT_PROTOCOL, + command, + type, + seq, + ts: now().toISOString(), + data, + }; + stream.write(`${JSON.stringify(row)}\n`); + } + + return { + writeStart(data = {}) { + write(stdout, 'start', data); + }, + writeProgress(data) { + write(stdout, 'progress', data); + }, + writeResult(data) { + write(stdout, 'result', data); + }, + writeEnd(data) { + write(stdout, 'end', data); + }, + writeWarning(data) { + write(stderr, 'warning', data); + }, + writeNeedsInput(data) { + write(stderr, 'needs-input', data); + }, + writeError(err) { + write(stderr, 'error', normalizeAgentError(err)); + }, + }; +} diff --git a/bin/git-cas.js b/bin/git-cas.js index 80b56fd..9c39079 100755 --- a/bin/git-cas.js +++ b/bin/git-cas.js @@ -12,8 +12,14 @@ import { renderEncryptionCard } from './ui/encryption-card.js'; import { renderHistoryTimeline } from './ui/history-timeline.js'; import { renderManifestView } from './ui/manifest-view.js'; import { renderHeatmap } from './ui/heatmap.js'; -import { buildVaultStats, inspectVaultHealth, renderDoctorReport, renderVaultStats } from './ui/vault-report.js'; +import { + buildVaultStats, + inspectVaultHealth, + renderDoctorReport, + renderVaultStats, +} from './ui/vault-report.js'; import { runAction } from './actions.js'; +import { runAgentCli } from './agent/cli.js'; import { flushStdioAndExit, installBrokenPipeHandlers } from './io.js'; import { filterEntries, formatTable, formatTabSeparated } from './ui/vault-list.js'; import { readPassphraseFile, promptPassphrase } from './ui/passphrase-prompt.js'; @@ -21,12 +27,17 @@ import { loadConfig, mergeConfig } from './config.js'; const __dirname = path.dirname(fileURLToPath(import.meta.url)); const { version: CLI_VERSION } = JSON.parse( - readFileSync(path.resolve(__dirname, '../package.json'), 'utf8'), + readFileSync(path.resolve(__dirname, '../package.json'), 'utf8') ); const getJson = () => program.opts().json; installBrokenPipeHandlers(); +if (process.argv[2] === 'agent') { + await runAgentCli(process.argv.slice(3)); + await flushStdioAndExit(); +} + program .name('git-cas') .description('Content Addressable Storage backed by Git') @@ -98,7 +109,9 @@ async function deriveVaultKey(cas, metadata, passphrase) { * @returns {boolean} */ function hasPassphraseSource(opts) { - return Boolean(opts.vaultPassphraseFile || opts.vaultPassphrase || process.env.GIT_CAS_PASSPHRASE); + return Boolean( + opts.vaultPassphraseFile || opts.vaultPassphrase || process.env.GIT_CAS_PASSPHRASE + ); } /** @@ -117,11 +130,15 @@ async function resolvePassphrase(opts, extra = {}) { return await readPassphraseFile(opts.vaultPassphraseFile); } if (opts.vaultPassphrase) { - if (!opts.vaultPassphrase.trim()) { throw new Error('Passphrase must not be empty'); } + if (!opts.vaultPassphrase.trim()) { + throw new Error('Passphrase must not be empty'); + } return opts.vaultPassphrase; } if (process.env.GIT_CAS_PASSPHRASE) { - if (!process.env.GIT_CAS_PASSPHRASE.trim()) { throw new Error('Passphrase must not be empty'); } + if (!process.env.GIT_CAS_PASSPHRASE.trim()) { + throw new Error('Passphrase must not be empty'); + } return process.env.GIT_CAS_PASSPHRASE; } if (process.stdin.isTTY) { @@ -196,7 +213,9 @@ async function buildStoreOpts(cas, file, opts) { storeOpts.recipients = opts.recipient; } else { const encryptionKey = await resolveEncryptionKey(cas, opts); - if (encryptionKey) { storeOpts.encryptionKey = encryptionKey; } + if (encryptionKey) { + storeOpts.encryptionKey = encryptionKey; + } } return storeOpts; } @@ -227,9 +246,13 @@ function parseRecipient(value, previous) { /** @param {string} v */ const parseIntFlag = (v) => { - if (!/^-?\d+$/.test(v)) { throw new Error(`Expected an integer, got "${v}"`); } + if (!/^-?\d+$/.test(v)) { + throw new Error(`Expected an integer, got "${v}"`); + } const n = Number(v); - if (!Number.isSafeInteger(n)) { throw new Error(`Expected a safe integer, got "${v}"`); } + if (!Number.isSafeInteger(n)) { + throw new Error(`Expected a safe integer, got "${v}"`); + } return n; }; @@ -241,7 +264,10 @@ program .option('--recipient ', 'Envelope recipient (repeatable)', parseRecipient) .option('--tree', 'Also create a Git tree and print its OID') .option('--force', 'Overwrite existing vault entry') - .option('--vault-passphrase ', 'Vault-level passphrase for encryption (prefer GIT_CAS_PASSPHRASE env var)') + .option( + '--vault-passphrase ', + 'Vault-level passphrase for encryption (prefer GIT_CAS_PASSPHRASE env var)' + ) .option('--vault-passphrase-file ', 'Read vault passphrase from file (use - for stdin)') .option('--gzip', 'Enable gzip compression') .addOption(new Option('--strategy ', 'Chunking strategy').choices(['fixed', 'cdc'])) @@ -253,37 +279,47 @@ program .option('--max-chunk-size ', 'CDC maximum chunk size', parseIntFlag) .option('--merkle-threshold ', 'Chunk count threshold for Merkle sub-manifests', parseIntFlag) .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {string} */ file, /** @type {Record} */ opts) => { - if (opts.recipient && (opts.keyFile || hasPassphraseSource(opts))) { - throw new Error('Provide --key-file or a vault passphrase source (--vault-passphrase, --vault-passphrase-file, GIT_CAS_PASSPHRASE), or --recipient — not both'); - } - if (opts.force && !opts.tree) { - throw new Error('--force requires --tree'); - } - const json = program.opts().json; - const quiet = program.opts().quiet || json; - const observer = new EventEmitterObserver(); - - const config = loadConfig(opts.cwd); - const { casConfig, storeExtras } = mergeConfig(opts, config); - const cas = createCas(opts.cwd, { observability: observer, ...casConfig }); - - const storeOpts = await buildStoreOpts(cas, file, opts); - Object.assign(storeOpts, storeExtras); - const progress = createStoreProgress({ filePath: file, chunkSize: cas.chunkSize, quiet }); - progress.attach(observer); - let manifest; - try { manifest = await cas.storeFile(storeOpts); } finally { progress.detach(); } - - if (opts.tree) { - const treeOid = await cas.createTree({ manifest }); - await cas.addToVault({ slug: opts.slug, treeOid, force: !!opts.force }); - process.stdout.write(json ? `${JSON.stringify({ treeOid })}\n` : `${treeOid}\n`); - } else { - const output = json ? JSON.stringify({ manifest: manifest.toJSON() }) : JSON.stringify(manifest.toJSON(), null, 2); - process.stdout.write(`${output}\n`); - } - }, getJson)); + .action( + runAction(async (/** @type {string} */ file, /** @type {Record} */ opts) => { + if (opts.recipient && (opts.keyFile || hasPassphraseSource(opts))) { + throw new Error( + 'Provide --key-file or a vault passphrase source (--vault-passphrase, --vault-passphrase-file, GIT_CAS_PASSPHRASE), or --recipient — not both' + ); + } + if (opts.force && !opts.tree) { + throw new Error('--force requires --tree'); + } + const json = program.opts().json; + const quiet = program.opts().quiet || json; + const observer = new EventEmitterObserver(); + + const config = loadConfig(opts.cwd); + const { casConfig, storeExtras } = mergeConfig(opts, config); + const cas = createCas(opts.cwd, { observability: observer, ...casConfig }); + + const storeOpts = await buildStoreOpts(cas, file, opts); + Object.assign(storeOpts, storeExtras); + const progress = createStoreProgress({ filePath: file, chunkSize: cas.chunkSize, quiet }); + progress.attach(observer); + let manifest; + try { + manifest = await cas.storeFile(storeOpts); + } finally { + progress.detach(); + } + + if (opts.tree) { + const treeOid = await cas.createTree({ manifest }); + await cas.addToVault({ slug: opts.slug, treeOid, force: !!opts.force }); + process.stdout.write(json ? `${JSON.stringify({ treeOid })}\n` : `${treeOid}\n`); + } else { + const output = json + ? JSON.stringify({ manifest: manifest.toJSON() }) + : JSON.stringify(manifest.toJSON(), null, 2); + process.stdout.write(`${output}\n`); + } + }, getJson) + ); // --------------------------------------------------------------------------- // tree @@ -293,18 +329,20 @@ program .description('Create a Git tree from a manifest') .requiredOption('--manifest ', 'Path to manifest JSON file') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - const cas = createCas(opts.cwd); - const raw = readFileSync(opts.manifest, 'utf8'); - const manifest = new Manifest(JSON.parse(raw)); - const treeOid = await cas.createTree({ manifest }); - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify({ treeOid })}\n`); - } else { - process.stdout.write(`${treeOid}\n`); - } - }, getJson)); + .action( + runAction(async (/** @type {Record} */ opts) => { + const cas = createCas(opts.cwd); + const raw = readFileSync(opts.manifest, 'utf8'); + const manifest = new Manifest(JSON.parse(raw)); + const treeOid = await cas.createTree({ manifest }); + const json = program.opts().json; + if (json) { + process.stdout.write(`${JSON.stringify({ treeOid })}\n`); + } else { + process.stdout.write(`${treeOid}\n`); + } + }, getJson) + ); // --------------------------------------------------------------------------- // inspect @@ -316,23 +354,25 @@ program .option('--oid ', 'Direct tree OID') .option('--heatmap', 'Show chunk heatmap visualization') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - validateRestoreFlags(opts); - const cas = createCas(opts.cwd); - const treeOid = opts.oid || await cas.resolveVaultEntry({ slug: opts.slug }); - const manifest = await cas.readManifest({ treeOid }); - const json = program.opts().json; - - if (json) { - process.stdout.write(`${JSON.stringify(manifest.toJSON())}\n`); - } else if (opts.heatmap) { - process.stdout.write(renderHeatmap({ manifest })); - } else if (process.stdout.isTTY) { - process.stdout.write(renderManifestView({ manifest })); - } else { - process.stdout.write(`${JSON.stringify(manifest.toJSON(), null, 2)}\n`); - } - }, getJson)); + .action( + runAction(async (/** @type {Record} */ opts) => { + validateRestoreFlags(opts); + const cas = createCas(opts.cwd); + const treeOid = opts.oid || (await cas.resolveVaultEntry({ slug: opts.slug })); + const manifest = await cas.readManifest({ treeOid }); + const json = program.opts().json; + + if (json) { + process.stdout.write(`${JSON.stringify(manifest.toJSON())}\n`); + } else if (opts.heatmap) { + process.stdout.write(renderHeatmap({ manifest })); + } else if (process.stdout.isTTY) { + process.stdout.write(renderManifestView({ manifest })); + } else { + process.stdout.write(`${JSON.stringify(manifest.toJSON(), null, 2)}\n`); + } + }, getJson) + ); // --------------------------------------------------------------------------- // restore @@ -344,51 +384,65 @@ program .option('--slug ', 'Resolve tree OID from vault slug') .option('--oid ', 'Direct tree OID') .option('--key-file ', 'Path to 32-byte raw encryption key file') - .option('--vault-passphrase ', 'Vault-level passphrase for decryption (prefer GIT_CAS_PASSPHRASE env var)') + .option( + '--vault-passphrase ', + 'Vault-level passphrase for decryption (prefer GIT_CAS_PASSPHRASE env var)' + ) .option('--vault-passphrase-file ', 'Read vault passphrase from file (use - for stdin)') .option('--concurrency ', 'Parallel chunk I/O operations', parseIntFlag) - .option('--max-restore-buffer ', 'Max bytes for buffered encrypted/compressed restore', parseIntFlag) + .option( + '--max-restore-buffer ', + 'Max bytes for buffered encrypted/compressed restore', + parseIntFlag + ) .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - validateRestoreFlags(opts); - const quiet = program.opts().quiet || program.opts().json; - const observer = new EventEmitterObserver(); - - const config = loadConfig(opts.cwd); - /** @type {Record} */ - const casConfig = {}; - const concurrency = opts.concurrency ?? config.concurrency; - const maxRestoreBufferSize = opts.maxRestoreBuffer ?? config.maxRestoreBufferSize; - if (concurrency !== undefined) { casConfig.concurrency = concurrency; } - if (maxRestoreBufferSize !== undefined) { casConfig.maxRestoreBufferSize = maxRestoreBufferSize; } - - const cas = createCas(opts.cwd, { observability: observer, ...casConfig }); - const treeOid = opts.oid || await cas.resolveVaultEntry({ slug: opts.slug }); - const manifest = await cas.readManifest({ treeOid }); + .action( + runAction(async (/** @type {Record} */ opts) => { + validateRestoreFlags(opts); + const quiet = program.opts().quiet || program.opts().json; + const observer = new EventEmitterObserver(); - const encryptionKey = await resolveEncryptionKey(cas, opts); + const config = loadConfig(opts.cwd); + /** @type {Record} */ + const casConfig = {}; + const concurrency = opts.concurrency ?? config.concurrency; + const maxRestoreBufferSize = opts.maxRestoreBuffer ?? config.maxRestoreBufferSize; + if (concurrency !== undefined) { + casConfig.concurrency = concurrency; + } + if (maxRestoreBufferSize !== undefined) { + casConfig.maxRestoreBufferSize = maxRestoreBufferSize; + } - const progress = createRestoreProgress({ - totalChunks: manifest.chunks.length, quiet, - }); - progress.attach(observer); - let bytesWritten; - try { - ({ bytesWritten } = await cas.restoreFile({ - manifest, - ...(encryptionKey ? { encryptionKey } : {}), - outputPath: opts.out, - })); - } finally { - progress.detach(); - } - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify({ bytesWritten })}\n`); - } else { - process.stdout.write(`${bytesWritten}\n`); - } - }, getJson)); + const cas = createCas(opts.cwd, { observability: observer, ...casConfig }); + const treeOid = opts.oid || (await cas.resolveVaultEntry({ slug: opts.slug })); + const manifest = await cas.readManifest({ treeOid }); + + const encryptionKey = await resolveEncryptionKey(cas, opts); + + const progress = createRestoreProgress({ + totalChunks: manifest.chunks.length, + quiet, + }); + progress.attach(observer); + let bytesWritten; + try { + ({ bytesWritten } = await cas.restoreFile({ + manifest, + ...(encryptionKey ? { encryptionKey } : {}), + outputPath: opts.out, + })); + } finally { + progress.detach(); + } + const json = program.opts().json; + if (json) { + process.stdout.write(`${JSON.stringify({ bytesWritten })}\n`); + } else { + process.stdout.write(`${bytesWritten}\n`); + } + }, getJson) + ); // --------------------------------------------------------------------------- // verify @@ -399,22 +453,26 @@ program .option('--slug ', 'Resolve tree OID from vault slug') .option('--oid ', 'Direct tree OID') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - validateRestoreFlags(opts); - const cas = createCas(opts.cwd); - const treeOid = opts.oid || await cas.resolveVaultEntry({ slug: opts.slug }); - const manifest = await cas.readManifest({ treeOid }); - const ok = await cas.verifyIntegrity(manifest); - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify({ ok, slug: manifest.slug, chunks: manifest.chunks.length })}\n`); - } else { - process.stdout.write(ok ? 'ok\n' : `fail: ${manifest.slug}\n`); - } - if (!ok) { - process.exitCode = 1; - } - }, getJson)); + .action( + runAction(async (/** @type {Record} */ opts) => { + validateRestoreFlags(opts); + const cas = createCas(opts.cwd); + const treeOid = opts.oid || (await cas.resolveVaultEntry({ slug: opts.slug })); + const manifest = await cas.readManifest({ treeOid }); + const ok = await cas.verifyIntegrity(manifest); + const json = program.opts().json; + if (json) { + process.stdout.write( + `${JSON.stringify({ ok, slug: manifest.slug, chunks: manifest.chunks.length })}\n` + ); + } else { + process.stdout.write(ok ? 'ok\n' : `fail: ${manifest.slug}\n`); + } + if (!ok) { + process.exitCode = 1; + } + }, getJson) + ); // --------------------------------------------------------------------------- // doctor @@ -423,53 +481,60 @@ program .command('doctor') .description('Inspect vault health and surface integrity issues') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - const cas = createCas(opts.cwd); - const report = await inspectVaultHealth(cas); - const json = program.opts().json; - - if (json) { - process.stdout.write(`${JSON.stringify(report)}\n`); - } else { - process.stdout.write(renderDoctorReport(report)); - } + .action( + runAction(async (/** @type {Record} */ opts) => { + const cas = createCas(opts.cwd); + const report = await inspectVaultHealth(cas); + const json = program.opts().json; - if (report.status !== 'ok') { - process.exitCode = 1; - } - }, getJson)); + if (json) { + process.stdout.write(`${JSON.stringify(report)}\n`); + } else { + process.stdout.write(renderDoctorReport(report)); + } + + if (report.status !== 'ok') { + process.exitCode = 1; + } + }, getJson) + ); // --------------------------------------------------------------------------- // vault init // --------------------------------------------------------------------------- -const vault = program - .command('vault') - .description('Manage the CAS vault'); +const vault = program.command('vault').description('Manage the CAS vault'); vault .command('init') .description('Initialize the vault') - .option('--vault-passphrase ', 'Passphrase for vault-level encryption (prefer GIT_CAS_PASSPHRASE env var)') + .option( + '--vault-passphrase ', + 'Passphrase for vault-level encryption (prefer GIT_CAS_PASSPHRASE env var)' + ) .option('--vault-passphrase-file ', 'Read vault passphrase from file (use - for stdin)') - .addOption(new Option('--algorithm ', 'KDF algorithm').choices(['pbkdf2', 'scrypt']).default('pbkdf2')) + .addOption( + new Option('--algorithm ', 'KDF algorithm').choices(['pbkdf2', 'scrypt']).default('pbkdf2') + ) .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - const cas = createCas(opts.cwd); - /** @type {{ passphrase?: string, kdfOptions?: { algorithm: 'pbkdf2' | 'scrypt' } }} */ - const initOpts = {}; - const passphrase = await resolvePassphrase(opts, { confirm: true }); - if (passphrase) { - initOpts.passphrase = passphrase; - initOpts.kdfOptions = { algorithm: /** @type {'pbkdf2' | 'scrypt'} */ (opts.algorithm) }; - } - const { commitOid } = await cas.initVault(initOpts); - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify({ commitOid })}\n`); - } else { - process.stdout.write(`${commitOid}\n`); - } - }, getJson)); + .action( + runAction(async (/** @type {Record} */ opts) => { + const cas = createCas(opts.cwd); + /** @type {{ passphrase?: string, kdfOptions?: { algorithm: 'pbkdf2' | 'scrypt' } }} */ + const initOpts = {}; + const passphrase = await resolvePassphrase(opts, { confirm: true }); + if (passphrase) { + initOpts.passphrase = passphrase; + initOpts.kdfOptions = { algorithm: /** @type {'pbkdf2' | 'scrypt'} */ (opts.algorithm) }; + } + const { commitOid } = await cas.initVault(initOpts); + const json = program.opts().json; + if (json) { + process.stdout.write(`${JSON.stringify({ commitOid })}\n`); + } else { + process.stdout.write(`${commitOid}\n`); + } + }, getJson) + ); // --------------------------------------------------------------------------- // vault list @@ -479,19 +544,21 @@ vault .description('List vault entries') .option('--filter ', 'Filter entries by glob pattern') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - const cas = createCas(opts.cwd); - const all = await cas.listVault(); - const entries = filterEntries(all, opts.filter); - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify(entries)}\n`); - } else if (process.stdout.isTTY) { - process.stdout.write(formatTable(entries)); - } else { - process.stdout.write(formatTabSeparated(entries)); - } - }, getJson)); + .action( + runAction(async (/** @type {Record} */ opts) => { + const cas = createCas(opts.cwd); + const all = await cas.listVault(); + const entries = filterEntries(all, opts.filter); + const json = program.opts().json; + if (json) { + process.stdout.write(`${JSON.stringify(entries)}\n`); + } else if (process.stdout.isTTY) { + process.stdout.write(formatTable(entries)); + } else { + process.stdout.write(formatTabSeparated(entries)); + } + }, getJson) + ); // --------------------------------------------------------------------------- // vault stats @@ -501,23 +568,25 @@ vault .description('Summarize vault size, dedupe, and encryption coverage') .option('--filter ', 'Filter entries by glob pattern') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - const cas = createCas(opts.cwd); - const all = await cas.listVault(); - const entries = filterEntries(all, opts.filter); - const records = []; - for (const entry of entries) { - const manifest = await cas.readManifest({ treeOid: entry.treeOid }); - records.push({ ...entry, manifest }); - } - const stats = buildVaultStats(records); - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify(stats)}\n`); - } else { - process.stdout.write(renderVaultStats(stats)); - } - }, getJson)); + .action( + runAction(async (/** @type {Record} */ opts) => { + const cas = createCas(opts.cwd); + const all = await cas.listVault(); + const entries = filterEntries(all, opts.filter); + const records = []; + for (const entry of entries) { + const manifest = await cas.readManifest({ treeOid: entry.treeOid }); + records.push({ ...entry, manifest }); + } + const stats = buildVaultStats(records); + const json = program.opts().json; + if (json) { + process.stdout.write(`${JSON.stringify(stats)}\n`); + } else { + process.stdout.write(renderVaultStats(stats)); + } + }, getJson) + ); // --------------------------------------------------------------------------- // vault remove @@ -526,16 +595,18 @@ vault .command('remove ') .description('Remove an entry from the vault') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {string} */ slug, /** @type {Record} */ opts) => { - const cas = createCas(opts.cwd); - const { commitOid, removedTreeOid } = await cas.removeFromVault({ slug }); - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify({ commitOid, removedTreeOid })}\n`); - } else { - process.stdout.write(`${removedTreeOid}\n`); - } - }, getJson)); + .action( + runAction(async (/** @type {string} */ slug, /** @type {Record} */ opts) => { + const cas = createCas(opts.cwd); + const { commitOid, removedTreeOid } = await cas.removeFromVault({ slug }); + const json = program.opts().json; + if (json) { + process.stdout.write(`${JSON.stringify({ commitOid, removedTreeOid })}\n`); + } else { + process.stdout.write(`${removedTreeOid}\n`); + } + }, getJson) + ); // --------------------------------------------------------------------------- // vault info @@ -545,29 +616,31 @@ vault .description('Show info for a vault entry') .option('--encryption', 'Show vault encryption details') .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {string} */ slug, /** @type {Record} */ opts) => { - const cas = createCas(opts.cwd); - const treeOid = await cas.resolveVaultEntry({ slug }); - const json = program.opts().json; - if (json) { - /** @type {Record} */ - const result = { slug, treeOid }; - if (opts.encryption) { - const metadata = await cas.getVaultMetadata(); - if (metadata?.encryption) { - result.encryption = metadata.encryption; + .action( + runAction(async (/** @type {string} */ slug, /** @type {Record} */ opts) => { + const cas = createCas(opts.cwd); + const treeOid = await cas.resolveVaultEntry({ slug }); + const json = program.opts().json; + if (json) { + /** @type {Record} */ + const result = { slug, treeOid }; + if (opts.encryption) { + const metadata = await cas.getVaultMetadata(); + if (metadata?.encryption) { + result.encryption = metadata.encryption; + } + } + process.stdout.write(`${JSON.stringify(result)}\n`); + } else { + process.stdout.write(`slug\t${slug}\n`); + process.stdout.write(`tree\t${treeOid}\n`); + if (opts.encryption) { + const metadata = await cas.getVaultMetadata(); + process.stdout.write(`\n${renderEncryptionCard({ metadata })}\n`); } } - process.stdout.write(`${JSON.stringify(result)}\n`); - } else { - process.stdout.write(`slug\t${slug}\n`); - process.stdout.write(`tree\t${treeOid}\n`); - if (opts.encryption) { - const metadata = await cas.getVaultMetadata(); - process.stdout.write(`\n${renderEncryptionCard({ metadata })}\n`); - } - } - }, getJson)); + }, getJson) + ); // --------------------------------------------------------------------------- // vault history @@ -578,33 +651,35 @@ vault .option('--cwd ', 'Git working directory', '.') .option('-n, --max-count ', 'Limit number of commits') .option('--pretty', 'Render as color-coded timeline') - .action(runAction(async (/** @type {Record} */ opts) => { - const plumbing = createGitPlumbing({ cwd: opts.cwd || '.' }); - const args = ['log', '--oneline', ContentAddressableStore.VAULT_REF]; - if (opts.maxCount) { - const n = parseInt(opts.maxCount, 10); - if (Number.isNaN(n) || n <= 0) { - throw new Error('--max-count must be a positive integer'); + .action( + runAction(async (/** @type {Record} */ opts) => { + const plumbing = createGitPlumbing({ cwd: opts.cwd || '.' }); + const args = ['log', '--oneline', ContentAddressableStore.VAULT_REF]; + if (opts.maxCount) { + const n = parseInt(opts.maxCount, 10); + if (Number.isNaN(n) || n <= 0) { + throw new Error('--max-count must be a positive integer'); + } + args.push(`-${n}`); } - args.push(`-${n}`); - } - const output = await plumbing.execute({ args }); - const json = program.opts().json; - if (json) { - const history = output - .split('\n') - .filter(Boolean) - .map((/** @type {string} */ line) => { - const [commitOid, ...messageParts] = line.trim().split(/\s+/); - return { commitOid, message: messageParts.join(' ') }; - }); - process.stdout.write(`${JSON.stringify(history)}\n`); - } else if (opts.pretty && process.stdout.isTTY) { - process.stdout.write(`${renderHistoryTimeline(output)}\n`); - } else { - process.stdout.write(`${output}\n`); - } - }, getJson)); + const output = await plumbing.execute({ args }); + const json = program.opts().json; + if (json) { + const history = output + .split('\n') + .filter(Boolean) + .map((/** @type {string} */ line) => { + const [commitOid, ...messageParts] = line.trim().split(/\s+/); + return { commitOid, message: messageParts.join(' ') }; + }); + process.stdout.write(`${JSON.stringify(history)}\n`); + } else if (opts.pretty && process.stdout.isTTY) { + process.stdout.write(`${renderHistoryTimeline(output)}\n`); + } else { + process.stdout.write(`${output}\n`); + } + }, getJson) + ); // --------------------------------------------------------------------------- // vault rotate @@ -625,8 +700,12 @@ async function resolveRotatePassphrases(opts) { const newPassphrase = opts.newPassphraseFile ? await readPassphraseFile(opts.newPassphraseFile) : opts.newPassphrase; - if (!oldPassphrase) { throw new Error('Old passphrase required (--old-passphrase or --old-passphrase-file)'); } - if (!newPassphrase) { throw new Error('New passphrase required (--new-passphrase or --new-passphrase-file)'); } + if (!oldPassphrase) { + throw new Error('Old passphrase required (--old-passphrase or --old-passphrase-file)'); + } + if (!newPassphrase) { + throw new Error('New passphrase required (--new-passphrase or --new-passphrase-file)'); + } return { oldPassphrase, newPassphrase }; } @@ -639,31 +718,33 @@ vault .option('--new-passphrase-file ', 'Read new passphrase from file (- for stdin)') .addOption(new Option('--algorithm ', 'KDF algorithm').choices(['pbkdf2', 'scrypt'])) .option('--cwd ', 'Git working directory', '.') - .action(runAction(async (/** @type {Record} */ opts) => { - const { oldPassphrase, newPassphrase } = await resolveRotatePassphrases(opts); - const cas = createCas(opts.cwd); - /** @type {{ oldPassphrase: string, newPassphrase: string, kdfOptions?: { algorithm: 'pbkdf2' | 'scrypt' } }} */ - const rotateOpts = { - oldPassphrase, - newPassphrase, - }; - if (opts.algorithm) { - rotateOpts.kdfOptions = { algorithm: /** @type {'pbkdf2' | 'scrypt'} */ (opts.algorithm) }; - } - const { commitOid, rotatedSlugs, skippedSlugs } = await cas.rotateVaultPassphrase(rotateOpts); - const json = program.opts().json; - if (json) { - process.stdout.write(`${JSON.stringify({ commitOid, rotatedSlugs, skippedSlugs })}\n`); - } else { - process.stdout.write(`${commitOid}\n`); - if (rotatedSlugs.length) { - process.stderr.write(`rotated: ${rotatedSlugs.join(', ')}\n`); + .action( + runAction(async (/** @type {Record} */ opts) => { + const { oldPassphrase, newPassphrase } = await resolveRotatePassphrases(opts); + const cas = createCas(opts.cwd); + /** @type {{ oldPassphrase: string, newPassphrase: string, kdfOptions?: { algorithm: 'pbkdf2' | 'scrypt' } }} */ + const rotateOpts = { + oldPassphrase, + newPassphrase, + }; + if (opts.algorithm) { + rotateOpts.kdfOptions = { algorithm: /** @type {'pbkdf2' | 'scrypt'} */ (opts.algorithm) }; } - if (skippedSlugs.length) { - process.stderr.write(`skipped: ${skippedSlugs.join(', ')}\n`); + const { commitOid, rotatedSlugs, skippedSlugs } = await cas.rotateVaultPassphrase(rotateOpts); + const json = program.opts().json; + if (json) { + process.stdout.write(`${JSON.stringify({ commitOid, rotatedSlugs, skippedSlugs })}\n`); + } else { + process.stdout.write(`${commitOid}\n`); + if (rotatedSlugs.length) { + process.stderr.write(`rotated: ${rotatedSlugs.join(', ')}\n`); + } + if (skippedSlugs.length) { + process.stderr.write(`skipped: ${skippedSlugs.join(', ')}\n`); + } } - } - }, getJson)); + }, getJson) + ); // --------------------------------------------------------------------------- // vault dashboard @@ -672,21 +753,26 @@ vault .command('dashboard') .description('Interactive CAS explorer') .option('--cwd ', 'Git working directory', '.') - .option('--ref ', 'Inspect a git ref that points to a CAS tree, CAS index blob, or commit with a manifest hint') + .option( + '--ref ', + 'Inspect a git ref that points to a CAS tree, CAS index blob, or commit with a manifest hint' + ) .option('--oid ', 'Inspect a direct CAS tree OID') - .action(runAction(async (/** @type {Record} */ opts) => { - if (opts.ref && opts.oid) { - throw new Error('Choose either --ref or --oid, not both'); - } - const cas = createCas(opts.cwd); - const { launchDashboard } = await import('./ui/dashboard.js'); - const source = opts.ref - ? { type: 'ref', ref: opts.ref } - : opts.oid - ? { type: 'oid', treeOid: opts.oid } - : { type: 'vault' }; - await launchDashboard(cas, { cwd: path.resolve(opts.cwd), source }); - }, getJson)); + .action( + runAction(async (/** @type {Record} */ opts) => { + if (opts.ref && opts.oid) { + throw new Error('Choose either --ref or --oid, not both'); + } + const cas = createCas(opts.cwd); + const { launchDashboard } = await import('./ui/dashboard.js'); + const source = opts.ref + ? { type: 'ref', ref: opts.ref } + : opts.oid + ? { type: 'oid', treeOid: opts.oid } + : { type: 'vault' }; + await launchDashboard(cas, { cwd: path.resolve(opts.cwd), source }); + }, getJson) + ); // --------------------------------------------------------------------------- // rotate @@ -700,43 +786,47 @@ program .requiredOption('--new-key-file ', 'Path to new 32-byte key file') .option('--label